Merge "Bug 1850 - The earlier patch missed out on removing the unnecessary call to...
authorMadhu Venugopal <mavenugo@gmail.com>
Mon, 22 Sep 2014 06:27:54 +0000 (06:27 +0000)
committerGerrit Code Review <gerrit@opendaylight.org>
Mon, 22 Sep 2014 06:27:54 +0000 (06:27 +0000)
142 files changed:
features/mdsal/pom.xml
features/mdsal/src/main/resources/features.xml
features/netconf/src/main/resources/features.xml
opendaylight/archetypes/opendaylight-configfile-archetype/pom.xml
opendaylight/archetypes/opendaylight-karaf-distro-archetype/pom.xml
opendaylight/archetypes/opendaylight-karaf-features/pom.xml
opendaylight/commons/opendaylight/pom.xml
opendaylight/distribution/opendaylight-karaf-resources/src/main/resources/bin/setenv
opendaylight/distribution/opendaylight-karaf-resources/src/main/resources/etc/custom.properties
opendaylight/karaf-branding/pom.xml
opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithExecutorServiceBenchmark.java
opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/ComponentActivator.java
opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/FromSalConversionsUtils.java
opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/InventoryAndReadAdapter.java
opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/MDFlowMapping.java
opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/NodeMapping.java
opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/ToSalConversionsUtils.java
opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/FromSalConversionsUtilsTest.java [new file with mode: 0644]
opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/NodeMappingTest.java
opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/TestFromSalConversionsUtils.java
opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/TestToSalConversionsUtils.java
opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/ToSalConversionsUtilsTest.java [new file with mode: 0644]
opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/FlowForwarder.java
opendaylight/md-sal/pom.xml
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/TestDriver.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ConfigParams.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/DefaultConfigParamsImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLog.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/ApplyLogEntries.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActorContext.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MessageCollectorActor.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MockAkkaJournal.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MockSnapshotStore.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/resources/application.conf
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingTranslatedTransactionChain.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/serialization/NormalizedNodeSerializer.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/serialization/PathArgumentSerializer.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/serialization/ValueSerializer.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/serialization/ValueType.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/xml/codec/XmlStreamUtils.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/xml/codec/XmlUtils.java
opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/ThreadExecutorStatsMXBeanImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardManager.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TerminationMonitor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStats.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ActorContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/config/yang/config/distributed_datastore_provider/DistributedConfigDataStoreProviderModule.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/config/yang/config/distributed_datastore_provider/DistributedOperationalDataStoreProviderModule.java
opendaylight/md-sal/sal-distributed-datastore/src/main/yang/distributed-datastore-provider.yang
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractActorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardManagerTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/InMemoryJournal.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/InMemorySnapshotStore.java
opendaylight/md-sal/sal-distributed-datastore/src/test/resources/application.conf
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/HashMapDataStoreModule.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/HashMapDataStoreModuleFactory.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/HashMapDataStore.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/HashMapDataStoreTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/yang/opendaylight-dom-broker-impl.yang
opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/xsql/XSQLProvider.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/ResolveDataChangeEventsTask.java
opendaylight/md-sal/sal-karaf-xsql/pom.xml
opendaylight/md-sal/sal-karaf-xsql/src/main/resources/OSGI-INF/blueprint/shell-log.xml [new file with mode: 0644]
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/listener/NetconfSessionCapabilities.java
opendaylight/md-sal/sal-netconf-connector/src/test/java/org/opendaylight/controller/sal/connect/netconf/listener/NetconfSessionCapabilitiesTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderFactory.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RoutedRpcListener.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcBroker.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcListener.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/TerminationMonitor.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStore.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Gossiper.java
opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/rest/api/RestconfService.java
opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/rest/impl/RestconfDocumentedExceptionMapper.java
opendaylight/md-sal/samples/l2switch/pom.xml
opendaylight/md-sal/topology-manager/pom.xml
opendaylight/md-sal/topology-manager/src/main/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporter.java
opendaylight/md-sal/topology-manager/src/main/java/org/opendaylight/md/controller/topology/manager/OperationProcessor.java
opendaylight/md-sal/topology-manager/src/test/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporterTest.java [new file with mode: 0644]
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/mapping/attributes/fromxml/ObjectNameAttributeReadingStrategy.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/mapping/attributes/fromxml/SimpleIdentityRefAttributeReadingStrategy.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/mapping/config/Config.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/mapping/config/ServiceRegistryWrapper.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/mapping/config/Services.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/operations/editconfig/EditConfig.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/operations/get/Get.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/operations/runtimerpc/RuntimeRpc.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/transactions/TransactionProvider.java
opendaylight/netconf/config-persister-impl/src/main/java/org/opendaylight/controller/netconf/persist/impl/ConfigPusherImpl.java
opendaylight/netconf/netconf-impl/src/main/java/org/opendaylight/controller/netconf/impl/osgi/NetconfMonitoringServiceImpl.java
opendaylight/netconf/netconf-impl/src/main/java/org/opendaylight/controller/netconf/impl/osgi/NetconfOperationRouterImpl.java
opendaylight/netconf/netconf-it/pom.xml
opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITSecureTest.java
opendaylight/netconf/netconf-it/src/test/resources/logback-test.xml
opendaylight/netconf/netconf-monitoring/src/main/java/org/opendaylight/controller/netconf/monitoring/xml/model/MonitoringSchema.java
opendaylight/netconf/netconf-monitoring/src/test/java/org/opendaylight/controller/netconf/monitoring/xml/JaxBSerializerTest.java
opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHanderReader.java [new file with mode: 0644]
opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandler.java
opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandlerWriter.java [new file with mode: 0644]
opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandlerTest.java
opendaylight/netconf/netconf-ssh/src/main/java/org/opendaylight/controller/netconf/ssh/osgi/NetconfSSHActivator.java
opendaylight/netconf/netconf-usermanager/src/main/java/org/opendaylight/controller/netconf/auth/usermanager/AuthProviderImpl.java
opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/mapping/AbstractNetconfOperation.java
opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageUtil.java
opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/osgi/NetconfConfigUtil.java
opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/xml/XmlElement.java
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractLastNetconfOperationTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractNetconfOperationTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractSingletonNetconfOperationTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessageAdditionalHeaderTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessageTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageHeaderTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageUtilTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/SendErrorExceptionUtilTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/osgi/NetconfConfigUtilTest.java [new file with mode: 0644]
opendaylight/northbound/networkconfiguration/neutron/pom.xml
opendaylight/sal/api/src/main/java/org/opendaylight/controller/sal/packet/ICMP.java
opendaylight/sal/api/src/main/java/org/opendaylight/controller/sal/packet/IPv4.java
opendaylight/sal/api/src/test/java/org/opendaylight/controller/sal/packet/ICMPTest.java
opendaylight/sal/api/src/test/java/org/opendaylight/controller/sal/packet/IPv4Test.java

index 960dfb37a176bbe0aeed7f03cc1019aff1b5994f..299e5b6707184ffacde75bf0d3fc246d5ecde07d 100644 (file)
@@ -13,6 +13,7 @@
 
   <properties>
     <features.file>features.xml</features.file>
+    <org.json.version>20131018</org.json.version>
   </properties>
 
   <dependencies>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-dom-xsql</artifactId>
     </dependency>
+
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-karaf-xsql</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-dom-xsql-config</artifactId>
       <type>xml</type>
       <classifier>config</classifier>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.samples</groupId>
+      <artifactId>clustering-it-config</artifactId>
+      <version>${mdsal.version}</version>
+      <type>xml</type>
+      <classifier>testmoduleshardconf</classifier>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.samples</groupId>
+      <artifactId>clustering-it-config</artifactId>
+      <version>${mdsal.version}</version>
+      <type>xml</type>
+      <classifier>testmoduleconf</classifier>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-rest-docgen</artifactId>
index da246b63e300d8a32070dd063cd2fcab4a6298a3..2a988ced05f805b5faf3b6ca31fea09255924883 100644 (file)
@@ -18,6 +18,7 @@
     <feature name='odl-mdsal-broker' version='${project.version}' description="OpenDaylight :: MDSAL :: Broker">
         <feature version='${yangtools.version}'>odl-yangtools-common</feature>
         <feature version='${yangtools.version}'>odl-yangtools-binding</feature>
+        <feature version='${yangtools.version}'>odl-yangtools-models</feature>
         <feature version='${mdsal.version}'>odl-mdsal-common</feature>
         <feature version='${config.version}'>odl-config-startup</feature>
         <feature version='${config.version}'>odl-config-netty</feature>
@@ -35,6 +36,9 @@
     <feature name='odl-restconf' version='${project.version}' description="OpenDaylight :: Restconf">
         <feature version='${mdsal.version}'>odl-mdsal-broker</feature>
         <feature>war</feature>
+        <!-- presently we need sal-remote to be listed BEFORE sal-rest-connector because sal-rest-connector
+             has a yang file which augments a yang file in sal-remote, and order seems to matter -->
+        <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
         <bundle>mvn:org.opendaylight.controller/sal-rest-connector/${project.version}</bundle>
         <bundle>mvn:com.google.code.gson/gson/${gson.version}</bundle>
         <bundle>mvn:org.opendaylight.yangtools/yang-data-codec-gson/${yangtools.version}</bundle>
@@ -47,7 +51,6 @@
         <bundle>mvn:io.netty/netty-common/${netty.version}</bundle>
         <bundle>mvn:io.netty/netty-handler/${netty.version}</bundle>
         <bundle>mvn:io.netty/netty-transport/${netty.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
         <configfile finalname="${config.configfile.directory}/${config.restconf.configfile}">mvn:org.opendaylight.controller/sal-rest-connector-config/${mdsal.version}/xml/config</configfile>
     </feature>
     <feature name='odl-toaster' version='${project.version}' description="OpenDaylight :: Toaster">
@@ -62,6 +65,7 @@
     <feature name ='odl-mdsal-xsql' version='${project.version}'>
         <feature version='${project.version}'>odl-mdsal-broker</feature>
         <bundle>mvn:org.opendaylight.controller/sal-dom-xsql/${project.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller/sal-karaf-xsql/${project.version}</bundle>
         <configfile finalname="${config.configfile.directory}/${config.xsql.configfile}">mvn:org.opendaylight.controller/sal-dom-xsql-config/${project.version}/xml/config</configfile>
     </feature>
     <feature name ='odl-mdsal-apidocs' version='${project.version}'>
index 743dae663e5f871e4d50f3e57ce86593e0742bcb..444f20865b565d48d1dd2e55d6b3362db051cf76 100644 (file)
@@ -11,8 +11,6 @@
     <feature version='${project.version}'>odl-netconf-mapping-api</feature>
     <feature version='${project.version}'>odl-netconf-util</feature>
     <feature version='${project.version}'>odl-netconf-impl</feature>
-    <feature version='${project.version}'>odl-netconf-tcp</feature>
-    <feature version='${project.version}'>odl-netconf-ssh</feature>
     <feature version='${project.version}'>odl-config-netconf-connector</feature>
     <feature version='${project.version}'>odl-netconf-netty-util</feature>
     <feature version='${project.version}'>odl-netconf-client</feature>
index 38c86164e9b88067847f0d8012965c0f3aba154c..56342218a0b8205555961832f60d3fca4846a809 100644 (file)
   <distributionManagement>
     <repository>
       <id>opendaylight-release</id>
-      <url>http://nexus.opendaylight.org/content/repositories/opendaylight.release/</url>
+      <url>${nexusproxy}/repositories/opendaylight.release/</url>
     </repository>
     <snapshotRepository>
       <id>opendaylight-snapshot</id>
-      <url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+      <url>${nexusproxy}/repositories/opendaylight.snapshot/</url>
     </snapshotRepository>
     <site>
       <id>website</id>
-      <url>dav:http://nexus.opendaylight.org/content/sites/site/sal-parent</url>
+      <url>dav:${nexusproxy}/sites/site/sal-parent</url>
     </site>
   </distributionManagement>
 </project>
index 8883c642951d4a253fcccdb4649ee7dc1b7f5339..9081ce797b177729018e53e9a5bcd930796ab330 100644 (file)
@@ -1,7 +1,11 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
-
+  <parent>
+     <groupId>org.opendaylight.controller.archetypes</groupId>
+     <artifactId>archetypes-parent</artifactId>
+     <version>0.1.1-SNAPSHOT</version>
+  </parent>
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>opendaylight-karaf-distro-archetype</artifactId>
   <version>1.0.0-SNAPSHOT</version>
index 4973a69537597fa7a6a285d5e4626f41ed2efb73..264402a3b1740c892efe51569c33af9ebbd39829 100644 (file)
@@ -2,6 +2,11 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
 
+  <parent>
+     <groupId>org.opendaylight.controller.archetypes</groupId>
+     <artifactId>archetypes-parent</artifactId>
+     <version>0.1.1-SNAPSHOT</version>
+  </parent>
   <groupId>org.opendaylight.controller</groupId>
   <artifactId>opendaylight-karaf-features-archetype</artifactId>
   <version>1.0.0-SNAPSHOT</version>
index 4240db939a604c44424ffe55323f8f52ab114a2d..a98afc8d19cd78597f978f370dfbb9e3cdd48054 100644 (file)
         <artifactId>sal-dom-xsql</artifactId>
         <version>${mdsal.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>sal-karaf-xsql</artifactId>
+        <version>${mdsal.version}</version>
+      </dependency>
       <dependency>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>sal-dom-xsql-config</artifactId>
   <repositories>
 
     <!-- OpenDayLight Repo Mirror -->
+    <!-- NOTE: URLs need to be hardcoded in the repository section because we have
+         parent poms that do NOT exist in this project and thus need to be pulled
+         down from the repository. To override these URLs you should use the
+         mirror section in your local settings.xml file. -->
     <repository>
       <releases>
         <enabled>true</enabled>
       <url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
     </pluginRepository>
   </pluginRepositories>
+
+  <!-- distribution management only runs when you run mvn deploy
+       which is if you are deploying compiled artifacts to a
+       maven repository. In that case logic dictacts that you already
+       compiled and thus already have the necessary parent pom files
+       that do not exist in this project pulled down to your local
+       .m2. That way the variables can be resolved and artifacts can
+       be uploaded when running mvn deploy. -->
   <distributionManagement>
     <!-- OpenDayLight Released artifact -->
     <repository>
       <id>opendaylight-release</id>
-      <url>http://nexus.opendaylight.org/content/repositories/opendaylight.release/</url>
+      <url>${nexusproxy}/repositories/opendaylight.release/</url>
     </repository>
     <!-- OpenDayLight Snapshot artifact -->
     <snapshotRepository>
       <id>opendaylight-snapshot</id>
-      <url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+      <url>${nexusproxy}/repositories/opendaylight.snapshot/</url>
     </snapshotRepository>
     <!-- Site deployment -->
     <site>
index 4f240447b44171475a3049db94a9e4d65705a1f3..947c65f6bd175a46f8c633d17d7bb305558438a7 100755 (executable)
 # export KARAF_ETC  # Karaf etc  folder
 # export KARAF_OPTS # Additional available Karaf options
 # export KARAF_DEBUG # Enable debug mode
-if [ "x$JAVA_MAX_PERM_MEM" == "x" ]; then
+if [ "x$JAVA_MAX_PERM_MEM" = "x" ]; then
     export JAVA_MAX_PERM_MEM="512m"
 fi
-if [ "x$JAVA_MAX_MEM" == "x" ]; then
+if [ "x$JAVA_MAX_MEM" = "x" ]; then
     export JAVA_MAX_MEM="2048m"
 fi
 
index e0e2759b370e00d68602a647b1258d5fa6e0aeb5..cdb65420135d1f71ebe1e66eb67dec8efda762dd 100644 (file)
@@ -127,3 +127,9 @@ java.util.logging.config.file=configuration/tomcat-logging.properties
 #Hosttracker hostsdb key scheme setting
 hosttracker.keyscheme=IP
 
+# LISP Flow Mapping configuration
+# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings
+lisp.mappingOverwrite = true
+# Enable the Solicit-Map-Request (SMR) mechanism
+lisp.smr = false
+
index 727f224fa11ba542abf43dde2e31f44174fbcfc0..444e77057a88b8c32f98a5ecad7d58ffa413f1b2 100644 (file)
@@ -2,7 +2,12 @@
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
 
     <modelVersion>4.0.0</modelVersion>
-
+    <parent>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>releasepom</artifactId>
+      <version>0.1.2-SNAPSHOT</version>
+      <relativePath>../..</relativePath>
+    </parent>
     <groupId>org.opendaylight.controller</groupId>
     <artifactId>karaf.branding</artifactId>
     <version>1.0.0-SNAPSHOT</version>
index 4b9d66f4f226c880f2f1bff53adcd4c55162302e..77a4966ec3c1a063d650e0c240018e70bd10af7d 100644 (file)
@@ -7,19 +7,21 @@
  */
 package org.opendaylight.controller.md.sal.dom.store.benchmark;
 
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
 import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import org.openjdk.jmh.annotations.Level;
-import org.openjdk.jmh.annotations.Setup;
-import org.openjdk.jmh.annotations.TearDown;
-import org.openjdk.jmh.annotations.Fork;
-import org.openjdk.jmh.annotations.Scope;
-import org.openjdk.jmh.annotations.State;
 import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
 import org.openjdk.jmh.annotations.Mode;
 import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
 
 /**
  * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
@@ -41,14 +43,15 @@ public class InMemoryDataStoreWithExecutorServiceBenchmark extends AbstractInMem
     private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
     private static final int MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE = 5000;
 
+    @Override
     @Setup(Level.Trial)
     public void setUp() throws Exception {
         final String name = "DS_BENCHMARK";
         final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
             MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL");
 
-        final ExecutorService domStoreExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
-            MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE, "DOMStore-" + name );
+        final ListeningExecutorService domStoreExecutor = MoreExecutors.listeningDecorator(SpecialExecutors.newBoundedSingleThreadExecutor(
+            MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE, "DOMStore-" + name ));
 
         domStore = new InMemoryDOMDataStore(name, domStoreExecutor,
             dataChangeListenerExecutor);
@@ -57,6 +60,7 @@ public class InMemoryDataStoreWithExecutorServiceBenchmark extends AbstractInMem
         initTestNode();
     }
 
+    @Override
     @TearDown
     public void tearDown() {
         schemaContext = null;
index d71858e5c36750cb33b206b6b6977ffcf604ffc8..4fc0cf75b33702574f12ecb605d87d5392566086 100644 (file)
@@ -139,6 +139,7 @@ public class ComponentActivator extends ComponentActivatorAbstractBase {
     protected Object[] getImplementations() {
         return new Object[] {
                 dataPacketService,
+                inventory,
         };
     }
 
@@ -148,6 +149,8 @@ public class ComponentActivator extends ComponentActivatorAbstractBase {
             _instanceConfigure((ComponentActivator)imp, c, containerName);
         } else if (imp instanceof DataPacketServiceAdapter) {
             _instanceConfigure((DataPacketServiceAdapter)imp, c, containerName);
+        } else if (imp instanceof InventoryAndReadAdapter) {
+            _instanceConfigure((InventoryAndReadAdapter)imp, c, containerName);
         } else {
             throw new IllegalArgumentException(String.format("Unhandled implementation class %s", imp.getClass()));
         }
@@ -215,6 +218,22 @@ public class ComponentActivator extends ComponentActivatorAbstractBase {
                 .setRequired(false));
     }
 
+    private void _instanceConfigure(final InventoryAndReadAdapter imp, final Component it, String containerName) {
+        it.setInterface(new String[] {
+                IPluginInInventoryService.class.getName(),
+                IPluginInReadService.class.getName(),
+        }, properties());
+
+        it.add(createServiceDependency()
+                .setService(IPluginOutReadService.class)
+                .setCallbacks("setReadPublisher", "unsetReadPublisher")
+                .setRequired(false));
+        it.add(createServiceDependency()
+                .setService(IPluginOutInventoryService.class)
+                .setCallbacks("setInventoryPublisher", "unsetInventoryPublisher")
+                .setRequired(false));
+    }
+
     private void _configure(final TopologyAdapter imp, final Component it) {
         it.setInterface(IPluginInTopologyService.class.getName(), properties());
 
index 1b648dc98c36c0c6e05bfce6740b294dd730e8b8..ecf1a94c18c8123dbcffe5c9d398ce69566f356d 100644 (file)
@@ -61,10 +61,16 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026
 
 import com.google.common.net.InetAddresses;
 
-public class FromSalConversionsUtils {
+/**
+ * MD-SAL to AD-SAL conversions collection
+ */
+public final class FromSalConversionsUtils {
 
-    private FromSalConversionsUtils() {
+    /** http://en.wikipedia.org/wiki/IPv4#Packet_structure (end of octet number 1, bit 14.+15.) */
+    public static final int ENC_FIELD_BIT_SIZE = 2;
 
+    private FromSalConversionsUtils() {
+        throw new IllegalAccessError("forcing no instance for factory");
     }
 
     @SuppressWarnings("unused")
@@ -469,5 +475,12 @@ public class FromSalConversionsUtils {
         return true;
     }
 
+    /**
+     * @param nwDscp NW-DSCP
+     * @return shifted to NW-TOS (with empty ECN part)
+     */
+    public static int dscpToTos(int nwDscp) {
+        return (short) (nwDscp << ENC_FIELD_BIT_SIZE);
+    }
 
 }
index e2c13867754d187f26f9f9a1dc4414a2b4114572..1530e909ad769ae8b7dcdf808aa47ad5bfe39783 100644 (file)
@@ -48,7 +48,6 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.ta
 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.AggregateFlowStatisticsUpdate;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowStatisticsData;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.FlowsStatisticsUpdate;
-import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetAllFlowsStatisticsFromAllFlowTablesInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.GetFlowStatisticsFromFlowTableInputBuilder;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsListener;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.statistics.rev130819.OpendaylightFlowStatisticsService;
@@ -241,20 +240,20 @@ public class InventoryAndReadAdapter implements IPluginInReadService, IPluginInI
      * @param id Table id
      * @return Table contents, or null if not present
      */
-    private Table readConfigTable(final Node node, final short id) {
+    private Table readOperationalTable(final Node node, final short id) {
         final InstanceIdentifier<Table> tableRef = InstanceIdentifier.builder(Nodes.class)
-                .child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class, InventoryMapping.toNodeKey(node))
+                .child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class, NodeMapping.toNodeKey(node))
                 .augmentation(FlowCapableNode.class)
                 .child(Table.class, new TableKey(id))
                 .build();
 
-        return (Table) startChange().readConfigurationData(tableRef);
+        return (Table) startChange().readOperationalData(tableRef);
     }
 
     @Override
     public List<FlowOnNode> readAllFlow(final Node node, final boolean cached) {
         final ArrayList<FlowOnNode> output = new ArrayList<>();
-        final Table table = readConfigTable(node, OPENFLOWV10_TABLE_ID);
+        final Table table = readOperationalTable(node, OPENFLOWV10_TABLE_ID);
         if (table != null) {
             final List<Flow> flows = table.getFlow();
             LOG.trace("Number of flows installed in table 0 of node {} : {}", node, flows.size());
@@ -268,12 +267,6 @@ public class InventoryAndReadAdapter implements IPluginInReadService, IPluginInI
             }
         }
 
-        // TODO (main): Shall we send request to the switch? It will make async request to the switch.
-        // Once the plugin receives a response, it will let the adaptor know through onFlowStatisticsUpdate()
-        // If we assume that md-sal statistics manager will always be running, then it is not required
-        // But if not, then sending request will collect the latest data for adaptor at least.
-        getFlowStatisticsService().getAllFlowsStatisticsFromAllFlowTables(
-                new GetAllFlowsStatisticsFromAllFlowTablesInputBuilder().setNode(NodeMapping.toNodeRef(node)).build());
         return output;
     }
 
@@ -334,7 +327,7 @@ public class InventoryAndReadAdapter implements IPluginInReadService, IPluginInI
     @Override
     public FlowOnNode readFlow(final Node node, final org.opendaylight.controller.sal.flowprogrammer.Flow targetFlow, final boolean cached) {
         FlowOnNode ret = null;
-        final Table table = readConfigTable(node, OPENFLOWV10_TABLE_ID);
+        final Table table = readOperationalTable(node, OPENFLOWV10_TABLE_ID);
         if (table != null) {
             final List<Flow> flows = table.getFlow();
             InventoryAndReadAdapter.LOG.trace("Number of flows installed in table 0 of node {} : {}", node, flows.size());
@@ -386,7 +379,7 @@ public class InventoryAndReadAdapter implements IPluginInReadService, IPluginInI
     @Override
     public NodeTableStatistics readNodeTable(final NodeTable nodeTable, final boolean cached) {
         NodeTableStatistics nodeStats = null;
-        final Table table = readConfigTable(nodeTable.getNode(), (short) nodeTable.getID());
+        final Table table = readOperationalTable(nodeTable.getNode(), (short) nodeTable.getID());
         if (table != null) {
             final FlowTableStatisticsData tableStats = table.getAugmentation(FlowTableStatisticsData.class);
             if (tableStats != null) {
index 5837e35b3a65b7bb4a7b8fc7c0ae2517cbe07db6..00511bc74449adfac7c1d1f3bb0cc968ecb95162 100644 (file)
@@ -315,7 +315,7 @@ public final class MDFlowMapping {
 
     private static SetNwTosActionCase _toAction(final SetNwTos sourceAction) {
         return new SetNwTosActionCaseBuilder()
-        .setSetNwTosAction(new SetNwTosActionBuilder().setTos(sourceAction.getNwTos()).build())
+        .setSetNwTosAction(new SetNwTosActionBuilder().setTos(FromSalConversionsUtils.dscpToTos(sourceAction.getNwTos())).build())
         .build();
     }
 
index b873f8a9fe060ff93c0279fbef0ad3cf59d0ab84..bcb2367e7a35685b8a660bf9b7d2a00c9effcb1b 100644 (file)
@@ -168,7 +168,22 @@ public final class NodeMapping {
      * @return
      */
     private static NodeId toNodeId(org.opendaylight.controller.sal.core.Node aDNode) {
-        return new NodeId(aDNode.getType() + ":" + String.valueOf(aDNode.getID()));
+        String targetPrefix = null;
+        if (NodeIDType.OPENFLOW.equals(aDNode.getType())) {
+                targetPrefix = OPENFLOW_ID_PREFIX;
+        } else {
+            targetPrefix = aDNode.getType() + ":";
+        }
+
+        return new NodeId(targetPrefix + String.valueOf(aDNode.getID()));
+    }
+
+    /**
+     * @param aDNode
+     * @return md-sal {@link NodeKey}
+     */
+    public static NodeKey toNodeKey(org.opendaylight.controller.sal.core.Node aDNode) {
+        return new NodeKey(toNodeId(aDNode));
     }
 
     public static String toNodeConnectorType(final NodeConnectorId ncId, final NodeId nodeId) {
index 28dd57c3b7986fecabae2c2fa9749e8d58fbd36e..dcc1a4660b5b71690419f377c84852f192b3c0dc 100644 (file)
@@ -128,7 +128,7 @@ public class ToSalConversionsUtils {
     private static final Logger LOG = LoggerFactory.getLogger(ToSalConversionsUtils.class);
 
     private ToSalConversionsUtils() {
-
+        throw new IllegalAccessError("forcing no instance for factory");
     }
 
     public static Flow toFlow(org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow source, Node node) {
@@ -287,7 +287,7 @@ public class ToSalConversionsUtils {
             } else if (sourceAction instanceof SetNwTosActionCase) {
                 Integer tos = ((SetNwTosActionCase) sourceAction).getSetNwTosAction().getTos();
                 if (tos != null) {
-                    targetAction.add(new SetNwTos(tos));
+                    targetAction.add(new SetNwTos(ToSalConversionsUtils.tosToNwDscp(tos)));
                 }
             } else if (sourceAction instanceof SetTpDstActionCase) {
                 PortNumber port = ((SetTpDstActionCase) sourceAction).getSetTpDstAction().getPort();
@@ -643,4 +643,12 @@ public class ToSalConversionsUtils {
 
         return mac;
     }
+
+    /**
+     * @param nwTos NW-TOS
+     * @return shifted to NW-DSCP
+     */
+    public static int tosToNwDscp(int nwTos) {
+        return (short) (nwTos >>> FromSalConversionsUtils.ENC_FIELD_BIT_SIZE);
+    }
 }
diff --git a/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/FromSalConversionsUtilsTest.java b/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/FromSalConversionsUtilsTest.java
new file mode 100644 (file)
index 0000000..b09e816
--- /dev/null
@@ -0,0 +1,31 @@
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.compatibility.test;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.sal.compatibility.FromSalConversionsUtils;
+
+/**
+ * test of {@link FromSalConversionsUtils}
+ */
+public class FromSalConversionsUtilsTest {
+
+    /**
+     * Test method for {@link org.opendaylight.controller.sal.compatibility.FromSalConversionsUtils#dscpToTos(int)}.
+     */
+    @Test
+    public void testDscpToTos() {
+        Assert.assertEquals(0, FromSalConversionsUtils.dscpToTos(0));
+        Assert.assertEquals(4, FromSalConversionsUtils.dscpToTos(1));
+        Assert.assertEquals(252, FromSalConversionsUtils.dscpToTos(63));
+        Assert.assertEquals(256, FromSalConversionsUtils.dscpToTos(64));
+        Assert.assertEquals(-4, FromSalConversionsUtils.dscpToTos(-1));
+    }
+
+}
index a776ef231292c1fa562cf19b7a1fe7571d7e8a06..759e69f5eb980844ed89b8dca581f57637486313 100644 (file)
@@ -196,6 +196,19 @@ public class NodeMappingTest {
         Assert.assertEquals(0xCC4E241C4A000000L, NodeMapping.openflowFullNodeIdToLong("14721743935839928320").longValue());
     }
 
+    /**
+     * Test method for
+     * {@link org.opendaylight.controller.sal.compatibility.NodeMapping#toNodeKey(org.opendaylight.controller.sal.core.Node)}
+     * .
+     * @throws ConstructionException
+     */
+    @Test
+    public void testToNodeKey() throws ConstructionException {
+        org.opendaylight.controller.sal.core.Node aDNode = new org.opendaylight.controller.sal.core.Node(NodeIDType.OPENFLOW, 42L);
+        NodeKey nodeKey = NodeMapping.toNodeKey(aDNode);
+        Assert.assertEquals("openflow:42", nodeKey.getId().getValue());
+    }
+
     /**
      * @param nodeId
      * @param portId
index 9f787b7e391010cee640d6b0582c4e0447ded4c2..98df90112deedfaa0815031802242bfae503784a 100644 (file)
@@ -293,7 +293,7 @@ public class TestFromSalConversionsUtils {
                     }
                     assertTrue("Ipv4 address wasn't found.", ipv4AddressFound);
                 } else if (innerAction instanceof SetNwTosActionCase) {
-                    assertEquals("Wrong TOS in SetNwTosAction.", (Integer) 63, ((SetNwTosActionCase) innerAction).getSetNwTosAction().getTos());
+                    assertEquals("Wrong TOS in SetNwTosAction.", (Integer) 252, ((SetNwTosActionCase) innerAction).getSetNwTosAction().getTos());
                 } else if (innerAction instanceof SetNwDstActionCase) {
                     Address address = ((SetNwDstActionCase) innerAction).getSetNwDstAction().getAddress();
                     boolean ipv4AddressFound = false;
index 60b77394c1f15e8055d93afa259862dccd33c5a3..16d0bb424d02746921d16f5c321915232f5219c8 100644 (file)
@@ -499,7 +499,7 @@ public class TestToSalConversionsUtils {
 
     private void prepareActionSetNwTos(SetNwTosActionCaseBuilder wrapper) {
         SetNwTosActionBuilder setNwTosActionBuilder = new SetNwTosActionBuilder();
-        setNwTosActionBuilder.setTos(63);
+        setNwTosActionBuilder.setTos(252);
         wrapper.setSetNwTosAction(setNwTosActionBuilder.build());
     }
 
diff --git a/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/ToSalConversionsUtilsTest.java b/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/ToSalConversionsUtilsTest.java
new file mode 100644 (file)
index 0000000..aa25c18
--- /dev/null
@@ -0,0 +1,31 @@
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.compatibility.test;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.sal.compatibility.ToSalConversionsUtils;
+
+/**
+ * test of {@link ToSalConversionsUtils}
+ */
+public class ToSalConversionsUtilsTest {
+
+    /**
+     * Test method for {@link org.opendaylight.controller.sal.compatibility.ToSalConversionsUtils#tosToNwDscp(int)}.
+     */
+    @Test
+    public void testTosToNwDscp() {
+        Assert.assertEquals(0, ToSalConversionsUtils.tosToNwDscp(0));
+        Assert.assertEquals(0, ToSalConversionsUtils.tosToNwDscp(1));
+        Assert.assertEquals(1, ToSalConversionsUtils.tosToNwDscp(4));
+        Assert.assertEquals(63, ToSalConversionsUtils.tosToNwDscp(252));
+        Assert.assertEquals(63, ToSalConversionsUtils.tosToNwDscp(253));
+        Assert.assertEquals(-1, ToSalConversionsUtils.tosToNwDscp(-1));
+    }
+}
index 9951bf744810dd3228de7815bb3c240d2b950e81..698dbcb0d1593a912cc1caba527fb9a12e174cfd 100644 (file)
@@ -7,7 +7,6 @@
  */
 package org.opendaylight.controller.frm.impl;
 
-import com.google.common.base.Preconditions;
 import org.opendaylight.controller.frm.ForwardingRulesManager;
 import org.opendaylight.controller.md.sal.binding.api.DataBroker;
 import org.opendaylight.controller.md.sal.binding.api.DataChangeListener;
@@ -33,6 +32,8 @@ import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Preconditions;
+
 /**
  * GroupForwarder
  * It implements {@link org.opendaylight.controller.md.sal.binding.api.DataChangeListener}}
@@ -52,8 +53,27 @@ public class FlowForwarder extends AbstractListeningCommiter<Flow> {
     public FlowForwarder (final ForwardingRulesManager manager, final DataBroker db) {
         super(manager, Flow.class);
         Preconditions.checkNotNull(db, "DataBroker can not be null!");
-        this.listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.CONFIGURATION,
-                getWildCardPath(), FlowForwarder.this, DataChangeScope.SUBTREE);
+        registrationListener(db, 5);
+    }
+
+    private void registrationListener(final DataBroker db, int i) {
+        try {
+            listenerRegistration = db.registerDataChangeListener(LogicalDatastoreType.CONFIGURATION,
+                    getWildCardPath(), FlowForwarder.this, DataChangeScope.SUBTREE);
+        } catch (final Exception e) {
+            if (i >= 1) {
+                try {
+                    Thread.sleep(100);
+                } catch (InterruptedException e1) {
+                    LOG.error("Thread interrupted '{}'", e1);
+                    Thread.currentThread().interrupt();
+                }
+                registrationListener(db, --i);
+            } else {
+                LOG.error("FRM Flow DataChange listener registration fail!", e);
+                throw new IllegalStateException("FlowForwarder registration Listener fail! System needs restart.", e);
+            }
+        }
     }
 
     @Override
@@ -61,7 +81,7 @@ public class FlowForwarder extends AbstractListeningCommiter<Flow> {
         if (listenerRegistration != null) {
             try {
                 listenerRegistration.close();
-            } catch (Exception e) {
+            } catch (final Exception e) {
                 LOG.error("Error by stop FRM FlowChangeListener.", e);
             }
             listenerRegistration = null;
@@ -80,7 +100,7 @@ public class FlowForwarder extends AbstractListeningCommiter<Flow> {
             builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
             builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
             builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
-            this.provider.getSalFlowService().removeFlow(builder.build());
+            provider.getSalFlowService().removeFlow(builder.build());
         }
     }
 
@@ -99,7 +119,7 @@ public class FlowForwarder extends AbstractListeningCommiter<Flow> {
             builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).build());
             builder.setOriginalFlow((new OriginalFlowBuilder(original)).build());
 
-            this.provider.getSalFlowService().updateFlow(builder.build());
+            provider.getSalFlowService().updateFlow(builder.build());
         }
     }
 
@@ -116,7 +136,7 @@ public class FlowForwarder extends AbstractListeningCommiter<Flow> {
             builder.setFlowRef(new FlowRef(identifier));
             builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
             builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
-            this.provider.getSalFlowService().addFlow(builder.build());
+            provider.getSalFlowService().addFlow(builder.build());
         }
     }
 
@@ -129,7 +149,7 @@ public class FlowForwarder extends AbstractListeningCommiter<Flow> {
     private boolean tableIdValidationPrecondition (final TableKey tableKey, final Flow flow) {
         Preconditions.checkNotNull(tableKey, "TableKey can not be null or empty!");
         Preconditions.checkNotNull(flow, "Flow can not be null or empty!");
-        if (flow.getTableId() != tableKey.getId()) {
+        if (! tableKey.getId().equals(flow.getTableId())) {
             LOG.error("TableID in URI tableId={} and in palyload tableId={} is not same.",
                     flow.getTableId(), tableKey.getId());
             return false;
index 71a0de9939a2693e2a93a62a1611ff852ab59b2c..a9c81b93536dccd22284debff0feccec7d9b9140 100644 (file)
@@ -77,6 +77,7 @@
 
     <!-- XSQL -->
     <module>sal-dom-xsql</module>
+    <module>sal-karaf-xsql</module>
     <module>sal-dom-xsql-config</module>
 
     <!-- Yang Test Models for MD-SAL -->
index c4ff108611d9fbdb177f2ef4ace98bb030d69991..04df7785ad8d27c69e77f9e0cd85a5cbb64208b0 100644 (file)
@@ -11,8 +11,10 @@ package org.opendaylight.controller.cluster.example;
 import akka.actor.ActorRef;
 import akka.actor.Props;
 import akka.japi.Creator;
+
 import com.google.common.base.Optional;
 import com.google.protobuf.ByteString;
+
 import org.opendaylight.controller.cluster.example.messages.KeyValue;
 import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
 import org.opendaylight.controller.cluster.example.messages.PrintRole;
@@ -67,11 +69,15 @@ public class ExampleActor extends RaftActor {
             }
 
         } else if (message instanceof PrintState) {
-            LOG.debug("State of the node:{} has entries={}, {}",
-                getId(), state.size(), getReplicatedLogState());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("State of the node:{} has entries={}, {}",
+                    getId(), state.size(), getReplicatedLogState());
+            }
 
         } else if (message instanceof PrintRole) {
-            LOG.debug("{} = {}, Peers={}", getId(), getRaftState(),getPeers());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("{} = {}, Peers={}", getId(), getRaftState(), getPeers());
+            }
 
         } else {
             super.onReceiveCommand(message);
@@ -106,7 +112,9 @@ public class ExampleActor extends RaftActor {
         } catch (Exception e) {
            LOG.error("Exception in applying snapshot", e);
         }
-        LOG.debug("Snapshot applied to state :" + ((HashMap) state).size());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Snapshot applied to state :" + ((HashMap) state).size());
+        }
     }
 
     private ByteString fromObject(Object snapshot) throws Exception {
@@ -159,4 +167,24 @@ public class ExampleActor extends RaftActor {
     @Override public String persistenceId() {
         return getId();
     }
+
+    @Override
+    protected void startLogRecoveryBatch(int maxBatchSize) {
+    }
+
+    @Override
+    protected void appendRecoveredLogEntry(Payload data) {
+    }
+
+    @Override
+    protected void applyCurrentLogRecoveryBatch() {
+    }
+
+    @Override
+    protected void onRecoveryComplete() {
+    }
+
+    @Override
+    protected void applyRecoverySnapshot(ByteString snapshot) {
+    }
 }
index 978ea91089dbcb1a530c9d66f6878ffa06579e2d..cb51a8951a54f158bac9b7bf1cab769662568fc3 100644 (file)
@@ -7,7 +7,6 @@ import org.opendaylight.controller.cluster.example.messages.PrintRole;
 import org.opendaylight.controller.cluster.example.messages.PrintState;
 import org.opendaylight.controller.cluster.raft.ConfigParams;
 import org.opendaylight.controller.cluster.raft.client.messages.AddRaftPeer;
-import org.opendaylight.controller.cluster.raft.client.messages.RemoveRaftPeer;
 
 import java.io.BufferedReader;
 import java.io.InputStreamReader;
@@ -196,11 +195,6 @@ public class TestDriver {
 
         actorSystem.stop(actorRef);
         actorRefs.remove(actorName);
-
-        for (ActorRef actor : actorRefs.values()) {
-            actor.tell(new RemoveRaftPeer(actorName), null);
-        }
-
         allPeers.remove(actorName);
     }
 
@@ -209,11 +203,6 @@ public class TestDriver {
         allPeers.put(actorName, address);
 
         ActorRef exampleActor = createExampleActor(actorName);
-
-        for (ActorRef actor : actorRefs.values()) {
-            actor.tell(new AddRaftPeer(actorName, address), null);
-        }
-
         actorRefs.put(actorName, exampleActor);
 
         addClientsToNode(actorName, 1);
index b436bce50061f98d0362ce3df4336c4279977d63..2be4a0c36f91bb41be4d21f79f928ef1c0bfe71f 100644 (file)
@@ -18,13 +18,14 @@ import java.util.List;
  */
 public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
 
-    protected List<ReplicatedLogEntry> journal;
+    // We define this as ArrayList so we can use ensureCapacity.
+    protected ArrayList<ReplicatedLogEntry> journal;
     protected ByteString snapshot;
     protected long snapshotIndex = -1;
     protected long snapshotTerm = -1;
 
     // to be used for rollback during save snapshot failure
-    protected List<ReplicatedLogEntry> snapshottedJournal;
+    protected ArrayList<ReplicatedLogEntry> snapshottedJournal;
     protected ByteString previousSnapshot;
     protected long previousSnapshotIndex = -1;
     protected long previousSnapshotTerm = -1;
@@ -106,6 +107,11 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
         journal.add(replicatedLogEntry);
     }
 
+    @Override
+    public void increaseJournalLogCapacity(int amount) {
+        journal.ensureCapacity(journal.size() + amount);
+    }
+
     @Override
     public List<ReplicatedLogEntry> getFrom(long logEntryIndex) {
         return getFrom(logEntryIndex, journal.size());
@@ -208,7 +214,6 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
 
     @Override
     public void snapshotCommit() {
-        snapshottedJournal.clear();
         snapshottedJournal = null;
         previousSnapshotIndex = -1;
         previousSnapshotTerm = -1;
@@ -218,7 +223,6 @@ public abstract class AbstractReplicatedLogImpl implements ReplicatedLog {
     @Override
     public void snapshotRollback() {
         snapshottedJournal.addAll(journal);
-        journal.clear();
         journal = snapshottedJournal;
         snapshottedJournal = null;
 
index ed6439d8c33bceb545927fd6c2f892665b160f8a..bff2a2779733761bff220e7e1223e85c46d18a73 100644 (file)
@@ -26,7 +26,7 @@ public interface ConfigParams {
      *
      * @return long
      */
-    public long getSnapshotBatchCount();
+    long getSnapshotBatchCount();
 
     /**
      * The interval at which a heart beat message will be sent to the remote
@@ -34,7 +34,7 @@ public interface ConfigParams {
      *
      * @return FiniteDuration
      */
-    public FiniteDuration getHeartBeatInterval();
+    FiniteDuration getHeartBeatInterval();
 
     /**
      * The interval in which a new election would get triggered if no leader is found
@@ -43,7 +43,7 @@ public interface ConfigParams {
      *
      * @return FiniteDuration
      */
-    public FiniteDuration getElectionTimeOutInterval();
+    FiniteDuration getElectionTimeOutInterval();
 
     /**
      * The maximum election time variance. The election is scheduled using both
@@ -51,10 +51,15 @@ public interface ConfigParams {
      *
      * @return int
      */
-    public int getElectionTimeVariance();
+    int getElectionTimeVariance();
 
     /**
      * The size (in bytes) of the snapshot chunk sent from Leader
      */
-    public int getSnapshotChunkSize();
+    int getSnapshotChunkSize();
+
+    /**
+     * The number of journal log entries to batch on recovery before applying.
+     */
+    int getJournalRecoveryLogBatchSize();
 }
index 9d06f6360473097beefbbce34962d7433f447f88..dc4145358aa332febafa690baa549fbfccfa91b2 100644 (file)
@@ -20,12 +20,14 @@ public class DefaultConfigParamsImpl implements ConfigParams {
 
     private static final int SNAPSHOT_BATCH_COUNT = 20000;
 
+    private static final int JOURNAL_RECOVERY_LOG_BATCH_SIZE = 1000;
+
     /**
      * The maximum election time variance
      */
     private static final int ELECTION_TIME_MAX_VARIANCE = 100;
 
-    private final int SNAPSHOT_CHUNK_SIZE = 2048 * 1000; //2MB
+    private static final int SNAPSHOT_CHUNK_SIZE = 2048 * 1000; //2MB
 
 
     /**
@@ -39,17 +41,32 @@ public class DefaultConfigParamsImpl implements ConfigParams {
         new FiniteDuration(100, TimeUnit.MILLISECONDS);
 
 
+    private FiniteDuration heartBeatInterval = HEART_BEAT_INTERVAL;
+    private long snapshotBatchCount = SNAPSHOT_BATCH_COUNT;
+    private int journalRecoveryLogBatchSize = JOURNAL_RECOVERY_LOG_BATCH_SIZE;
+
+    public void setHeartBeatInterval(FiniteDuration heartBeatInterval) {
+        this.heartBeatInterval = heartBeatInterval;
+    }
+
+    public void setSnapshotBatchCount(long snapshotBatchCount) {
+        this.snapshotBatchCount = snapshotBatchCount;
+    }
+
+    public void setJournalRecoveryLogBatchSize(int journalRecoveryLogBatchSize) {
+        this.journalRecoveryLogBatchSize = journalRecoveryLogBatchSize;
+    }
+
     @Override
     public long getSnapshotBatchCount() {
-        return SNAPSHOT_BATCH_COUNT;
+        return snapshotBatchCount;
     }
 
     @Override
     public FiniteDuration getHeartBeatInterval() {
-        return HEART_BEAT_INTERVAL;
+        return heartBeatInterval;
     }
 
-
     @Override
     public FiniteDuration getElectionTimeOutInterval() {
         // returns 2 times the heart beat interval
@@ -65,4 +82,9 @@ public class DefaultConfigParamsImpl implements ConfigParams {
     public int getSnapshotChunkSize() {
         return SNAPSHOT_CHUNK_SIZE;
     }
+
+    @Override
+    public int getJournalRecoveryLogBatchSize() {
+        return journalRecoveryLogBatchSize;
+    }
 }
index 190f1bd409e6c69ad8f8d8df30a6eaee7a04b3a7..64fa7496042466e58bd51cf0a488c265898866da 100644 (file)
@@ -20,7 +20,9 @@ import akka.persistence.SnapshotOffer;
 import akka.persistence.SnapshotSelectionCriteria;
 import akka.persistence.UntypedPersistentActor;
 import com.google.common.base.Optional;
+import com.google.common.base.Stopwatch;
 import com.google.protobuf.ByteString;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
@@ -38,7 +40,6 @@ import org.opendaylight.controller.cluster.raft.client.messages.RemoveRaftPeer;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
-
 import java.io.Serializable;
 import java.util.Map;
 
@@ -96,7 +97,7 @@ public abstract class RaftActor extends UntypedPersistentActor {
      * This context should NOT be passed directly to any other actor it is
      * only to be consumed by the RaftActorBehaviors
      */
-    private RaftActorContext context;
+    private final RaftActorContext context;
 
     /**
      * The in-memory journal
@@ -107,6 +108,10 @@ public abstract class RaftActor extends UntypedPersistentActor {
 
     private volatile boolean hasSnapshotCaptureInitiated = false;
 
+    private Stopwatch recoveryTimer;
+
+    private int currentRecoveryBatchCount;
+
     public RaftActor(String id, Map<String, String> peerAddresses) {
         this(id, peerAddresses, Optional.<ConfigParams>absent());
     }
@@ -121,71 +126,167 @@ public abstract class RaftActor extends UntypedPersistentActor {
             LOG);
     }
 
-    @Override public void onReceiveRecover(Object message) {
-        if (message instanceof SnapshotOffer) {
-            LOG.info("SnapshotOffer called..");
-            SnapshotOffer offer = (SnapshotOffer) message;
-            Snapshot snapshot = (Snapshot) offer.snapshot();
-
-            // Create a replicated log with the snapshot information
-            // The replicated log can be used later on to retrieve this snapshot
-            // when we need to install it on a peer
-            replicatedLog = new ReplicatedLogImpl(snapshot);
-
-            context.setReplicatedLog(replicatedLog);
-            context.setLastApplied(snapshot.getLastAppliedIndex());
-
-            LOG.info("Applied snapshot to replicatedLog. " +
-                    "snapshotIndex={}, snapshotTerm={}, journal-size={}",
-                replicatedLog.snapshotIndex, replicatedLog.snapshotTerm,
-                replicatedLog.size()
-            );
+    private void initRecoveryTimer() {
+        if(recoveryTimer == null) {
+            recoveryTimer = new Stopwatch();
+            recoveryTimer.start();
+        }
+    }
 
-            // Apply the snapshot to the actors state
-            applySnapshot(ByteString.copyFrom(snapshot.getState()));
+    @Override
+    public void preStart() throws Exception {
+        LOG.info("Starting recovery for {} with journal batch size {}", persistenceId(),
+                context.getConfigParams().getJournalRecoveryLogBatchSize());
+        super.preStart();
+    }
 
+    @Override
+    public void onReceiveRecover(Object message) {
+        if (message instanceof SnapshotOffer) {
+            onRecoveredSnapshot((SnapshotOffer)message);
         } else if (message instanceof ReplicatedLogEntry) {
-            ReplicatedLogEntry logEntry = (ReplicatedLogEntry) message;
-
-            // Apply State immediately
-            replicatedLog.append(logEntry);
-            applyState(null, "recovery", logEntry.getData());
-            context.setLastApplied(logEntry.getIndex());
-            context.setCommitIndex(logEntry.getIndex());
+            onRecoveredJournalLogEntry((ReplicatedLogEntry)message);
+        } else if (message instanceof ApplyLogEntries) {
+            onRecoveredApplyLogEntries((ApplyLogEntries)message);
         } else if (message instanceof DeleteEntries) {
             replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
         } else if (message instanceof UpdateElectionTerm) {
-            context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(), ((UpdateElectionTerm) message).getVotedFor());
+            context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
+                    ((UpdateElectionTerm) message).getVotedFor());
         } else if (message instanceof RecoveryCompleted) {
-            LOG.debug(
-                "RecoveryCompleted - Switching actor to Follower - " +
-                    "Persistence Id =  " + persistenceId() +
-                    " Last index in log:{}, snapshotIndex={}, snapshotTerm={}, " +
-                    "journal-size={}",
-                replicatedLog.lastIndex(), replicatedLog.snapshotIndex,
-                replicatedLog.snapshotTerm, replicatedLog.size());
-            currentBehavior = switchBehavior(RaftState.Follower);
-            onStateChanged();
+            onRecoveryCompletedMessage();
         }
     }
 
+    private void onRecoveredSnapshot(SnapshotOffer offer) {
+        LOG.debug("SnapshotOffer called..");
+
+        initRecoveryTimer();
+
+        Snapshot snapshot = (Snapshot) offer.snapshot();
+
+        // Create a replicated log with the snapshot information
+        // The replicated log can be used later on to retrieve this snapshot
+        // when we need to install it on a peer
+        replicatedLog = new ReplicatedLogImpl(snapshot);
+
+        context.setReplicatedLog(replicatedLog);
+        context.setLastApplied(snapshot.getLastAppliedIndex());
+        context.setCommitIndex(snapshot.getLastAppliedIndex());
+
+        Stopwatch timer = new Stopwatch();
+        timer.start();
+
+        // Apply the snapshot to the actors state
+        applyRecoverySnapshot(ByteString.copyFrom(snapshot.getState()));
+
+        timer.stop();
+        LOG.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size=" +
+                replicatedLog.size(), persistenceId(), timer.toString(),
+                replicatedLog.snapshotIndex, replicatedLog.snapshotTerm);
+    }
+
+    private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Received ReplicatedLogEntry for recovery: {}", logEntry.getIndex());
+        }
+
+        replicatedLog.append(logEntry);
+    }
+
+    private void onRecoveredApplyLogEntries(ApplyLogEntries ale) {
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Received ApplyLogEntries for recovery, applying to state: {} to {}",
+                    context.getLastApplied() + 1, ale.getToIndex());
+        }
+
+        for (long i = context.getLastApplied() + 1; i <= ale.getToIndex(); i++) {
+            batchRecoveredLogEntry(replicatedLog.get(i));
+        }
+
+        context.setLastApplied(ale.getToIndex());
+        context.setCommitIndex(ale.getToIndex());
+    }
+
+    private void batchRecoveredLogEntry(ReplicatedLogEntry logEntry) {
+        initRecoveryTimer();
+
+        int batchSize = context.getConfigParams().getJournalRecoveryLogBatchSize();
+        if(currentRecoveryBatchCount == 0) {
+            startLogRecoveryBatch(batchSize);
+        }
+
+        appendRecoveredLogEntry(logEntry.getData());
+
+        if(++currentRecoveryBatchCount >= batchSize) {
+            endCurrentLogRecoveryBatch();
+        }
+    }
+
+    private void endCurrentLogRecoveryBatch() {
+        applyCurrentLogRecoveryBatch();
+        currentRecoveryBatchCount = 0;
+    }
+
+    private void onRecoveryCompletedMessage() {
+        if(currentRecoveryBatchCount > 0) {
+            endCurrentLogRecoveryBatch();
+        }
+
+        onRecoveryComplete();
+
+        String recoveryTime = "";
+        if(recoveryTimer != null) {
+            recoveryTimer.stop();
+            recoveryTime = " in " + recoveryTimer.toString();
+            recoveryTimer = null;
+        }
+
+        LOG.info(
+            "Recovery completed" + recoveryTime + " - Switching actor to Follower - " +
+                "Persistence Id =  " + persistenceId() +
+                " Last index in log={}, snapshotIndex={}, snapshotTerm={}, " +
+                "journal-size={}",
+            replicatedLog.lastIndex(), replicatedLog.snapshotIndex,
+            replicatedLog.snapshotTerm, replicatedLog.size());
+
+        currentBehavior = switchBehavior(RaftState.Follower);
+        onStateChanged();
+    }
+
     @Override public void onReceiveCommand(Object message) {
         if (message instanceof ApplyState){
             ApplyState applyState = (ApplyState) message;
 
-            LOG.debug("Applying state for log index {} data {}",
-                applyState.getReplicatedLogEntry().getIndex(),
-                applyState.getReplicatedLogEntry().getData());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Applying state for log index {} data {}",
+                    applyState.getReplicatedLogEntry().getIndex(),
+                    applyState.getReplicatedLogEntry().getData());
+            }
 
             applyState(applyState.getClientActor(), applyState.getIdentifier(),
                 applyState.getReplicatedLogEntry().getData());
 
+        } else if (message instanceof ApplyLogEntries){
+            ApplyLogEntries ale = (ApplyLogEntries) message;
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Persisting ApplyLogEntries with index={}", ale.getToIndex());
+            }
+            persist(new ApplyLogEntries(ale.getToIndex()), new Procedure<ApplyLogEntries>() {
+                @Override
+                public void apply(ApplyLogEntries param) throws Exception {
+                }
+            });
+
         } else if(message instanceof ApplySnapshot ) {
             Snapshot snapshot = ((ApplySnapshot) message).getSnapshot();
 
-            LOG.debug("ApplySnapshot called on Follower Actor " +
-                "snapshotIndex:{}, snapshotTerm:{}", snapshot.getLastAppliedIndex(),
-                snapshot.getLastAppliedTerm());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("ApplySnapshot called on Follower Actor " +
+                        "snapshotIndex:{}, snapshotTerm:{}", snapshot.getLastAppliedIndex(),
+                    snapshot.getLastAppliedTerm()
+                );
+            }
             applySnapshot(ByteString.copyFrom(snapshot.getState()));
 
             //clears the followers log, sets the snapshot index to ensure adjusted-index works
@@ -253,7 +354,9 @@ public abstract class RaftActor extends UntypedPersistentActor {
         } else {
             if (!(message instanceof AppendEntriesMessages.AppendEntries)
                 && !(message instanceof AppendEntriesReply) && !(message instanceof SendHeartBeat)) {
-                LOG.debug("onReceiveCommand: message:" + message.getClass());
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("onReceiveCommand: message:" + message.getClass());
+                }
             }
 
             RaftState state =
@@ -294,7 +397,9 @@ public abstract class RaftActor extends UntypedPersistentActor {
             context.getReplicatedLog().lastIndex() + 1,
             context.getTermInformation().getCurrentTerm(), data);
 
-        LOG.debug("Persist data {}", replicatedLogEntry);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Persist data {}", replicatedLogEntry);
+        }
 
         replicatedLog
             .appendAndPersist(clientActor, identifier, replicatedLogEntry);
@@ -359,6 +464,10 @@ public abstract class RaftActor extends UntypedPersistentActor {
         return context.getLastApplied();
     }
 
+    protected RaftActorContext getRaftActorContext() {
+        return context;
+    }
+
     /**
      * setPeerAddress sets the address of a known peer at a later time.
      * <p>
@@ -399,6 +508,38 @@ public abstract class RaftActor extends UntypedPersistentActor {
     protected abstract void applyState(ActorRef clientActor, String identifier,
         Object data);
 
+    /**
+     * This method is called during recovery at the start of a batch of state entries. Derived
+     * classes should perform any initialization needed to start a batch.
+     */
+    protected abstract void startLogRecoveryBatch(int maxBatchSize);
+
+    /**
+     * This method is called during recovery to append state data to the current batch. This method
+     * is called 1 or more times after {@link #startRecoveryStateBatch}.
+     *
+     * @param data the state data
+     */
+    protected abstract void appendRecoveredLogEntry(Payload data);
+
+    /**
+     * This method is called during recovery to reconstruct the state of the actor.
+     *
+     * @param snapshot A snapshot of the state of the actor
+     */
+    protected abstract void applyRecoverySnapshot(ByteString snapshot);
+
+    /**
+     * This method is called during recovery at the end of a batch to apply the current batched
+     * log entries. This method is called after {@link #appendRecoveryLogEntry}.
+     */
+    protected abstract void applyCurrentLogRecoveryBatch();
+
+    /**
+     * This method is called when recovery is complete.
+     */
+    protected abstract void onRecoveryComplete();
+
     /**
      * This method will be called by the RaftActor when a snapshot needs to be
      * created. The derived actor should respond with its current state.
@@ -411,10 +552,7 @@ public abstract class RaftActor extends UntypedPersistentActor {
     protected abstract void createSnapshot();
 
     /**
-     * This method will be called by the RaftActor during recovery to
-     * reconstruct the state of the actor.
-     * <p/>
-     * This method may also be called at any other point during normal
+     * This method can be called at any other point during normal
      * operations when the derived actor is out of sync with it's peers
      * and the only way to bring it in sync is by applying a snapshot
      *
@@ -483,8 +621,10 @@ public abstract class RaftActor extends UntypedPersistentActor {
             return null;
         }
         String peerAddress = context.getPeerAddress(leaderId);
-        LOG.debug("getLeaderAddress leaderId = " + leaderId + " peerAddress = "
-            + peerAddress);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("getLeaderAddress leaderId = " + leaderId + " peerAddress = "
+                + peerAddress);
+        }
 
         return peerAddress;
     }
@@ -569,6 +709,7 @@ public abstract class RaftActor extends UntypedPersistentActor {
             // of a single command.
             persist(replicatedLogEntry,
                 new Procedure<ReplicatedLogEntry>() {
+                    @Override
                     public void apply(ReplicatedLogEntry evt) throws Exception {
                         // when a snaphsot is being taken, captureSnapshot != null
                         if (hasSnapshotCaptureInitiated == false &&
@@ -584,10 +725,13 @@ public abstract class RaftActor extends UntypedPersistentActor {
                                 lastAppliedTerm = lastAppliedEntry.getTerm();
                             }
 
-                            LOG.debug("Snapshot Capture logSize: {}", journal.size());
-                            LOG.debug("Snapshot Capture lastApplied:{} ", context.getLastApplied());
-                            LOG.debug("Snapshot Capture lastAppliedIndex:{}", lastAppliedIndex);
-                            LOG.debug("Snapshot Capture lastAppliedTerm:{}", lastAppliedTerm);
+                            if(LOG.isDebugEnabled()) {
+                                LOG.debug("Snapshot Capture logSize: {}", journal.size());
+                                LOG.debug("Snapshot Capture lastApplied:{} ",
+                                    context.getLastApplied());
+                                LOG.debug("Snapshot Capture lastAppliedIndex:{}", lastAppliedIndex);
+                                LOG.debug("Snapshot Capture lastAppliedTerm:{}", lastAppliedTerm);
+                            }
 
                             // send a CaptureSnapshot to self to make the expensive operation async.
                             getSelf().tell(new CaptureSnapshot(
@@ -630,17 +774,20 @@ public abstract class RaftActor extends UntypedPersistentActor {
         private long currentTerm = 0;
         private String votedFor = null;
 
+        @Override
         public long getCurrentTerm() {
             return currentTerm;
         }
 
+        @Override
         public String getVotedFor() {
             return votedFor;
         }
 
         @Override public void update(long currentTerm, String votedFor) {
-            LOG.debug("Set currentTerm={}, votedFor={}", currentTerm, votedFor);
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Set currentTerm={}, votedFor={}", currentTerm, votedFor);
+            }
             this.currentTerm = currentTerm;
             this.votedFor = votedFor;
         }
index 25da37105cea18e46270a4baccfdae9b459a2500..e4aef0a8445d2400e54fff5c1d09c9044747134f 100644 (file)
@@ -59,26 +59,32 @@ public class RaftActorContextImpl implements RaftActorContext {
         this.LOG = logger;
     }
 
+    @Override
     public ActorRef actorOf(Props props){
         return context.actorOf(props);
     }
 
+    @Override
     public ActorSelection actorSelection(String path){
         return context.actorSelection(path);
     }
 
+    @Override
     public String getId() {
         return id;
     }
 
+    @Override
     public ActorRef getActor() {
         return actor;
     }
 
+    @Override
     public ElectionTerm getTermInformation() {
         return termInformation;
     }
 
+    @Override
     public long getCommitIndex() {
         return commitIndex;
     }
@@ -87,6 +93,7 @@ public class RaftActorContextImpl implements RaftActorContext {
         this.commitIndex = commitIndex;
     }
 
+    @Override
     public long getLastApplied() {
         return lastApplied;
     }
index c17f5448c6e256a97c4f7134959bb6c2d88a0971..85893333c2892eadf43e9aacc374f574c1b4a03a 100644 (file)
@@ -74,6 +74,13 @@ public interface ReplicatedLog {
      */
     void append(ReplicatedLogEntry replicatedLogEntry);
 
+    /**
+     * Optimization method to increase the capacity of the journal log prior to appending entries.
+     *
+     * @param amount the amount to increase by
+     */
+    void increaseJournalLogCapacity(int amount);
+
     /**
      *
      * @param replicatedLogEntry
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/ApplyLogEntries.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/base/messages/ApplyLogEntries.java
new file mode 100644 (file)
index 0000000..af3c4fd
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.base.messages;
+
+import java.io.Serializable;
+
+/**
+ * ApplyLogEntries serves as a message which is stored in the akka's persistent
+ * journal.
+ * During recovery if this message is found, then all in-mem journal entries from
+ * context.lastApplied to ApplyLogEntries.toIndex are applied to the state
+ *
+ * This class is also used as a internal message sent from Behaviour to
+ * RaftActor to persist the ApplyLogEntries
+ *
+ */
+public class ApplyLogEntries implements Serializable {
+    private final int toIndex;
+
+    public ApplyLogEntries(int toIndex) {
+        this.toIndex = toIndex;
+    }
+
+    public int getToIndex() {
+        return toIndex;
+    }
+}
index 86447509d7b28a4df5559dfac8d02a02c0baa2b8..b1560a5648b283e028ae0dc96e4267d92c6f438f 100644 (file)
@@ -15,6 +15,7 @@ import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.SerializationUtils;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
 import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
@@ -272,6 +273,17 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
         return null;
     }
 
+    /**
+     * Find the client request tracker for a specific logIndex
+     *
+     * @param logIndex
+     * @return
+     */
+    protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
+        return null;
+    }
+
+
     /**
      * Find the log index from the previous to last entry in the log
      *
@@ -311,7 +323,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
              i < index + 1; i++) {
             ActorRef clientActor = null;
             String identifier = null;
-            ClientRequestTracker tracker = findClientRequestTracker(i);
+            ClientRequestTracker tracker = removeClientRequestTracker(i);
 
             if (tracker != null) {
                 clientActor = tracker.getClientActor();
@@ -336,6 +348,12 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
         }
         context.getLogger().debug("Setting last applied to {}", newLastApplied);
         context.setLastApplied(newLastApplied);
+
+        // send a message to persist a ApplyLogEntries marker message into akka's persistent journal
+        // will be used during recovery
+        //in case if the above code throws an error and this message is not sent, it would be fine
+        // as the  append entries received later would initiate add this message to the journal
+        actor().tell(new ApplyLogEntries((int) context.getLastApplied()), actor());
     }
 
     protected Object fromSerializableMessage(Object serializable){
index 610fdc987fde7a1a51491ef25dc2f764011b9eda..1cfdf9dba8b912a5bc23f78b85c447621298d908 100644 (file)
@@ -9,6 +9,7 @@
 package org.opendaylight.controller.cluster.raft.behaviors;
 
 import akka.actor.ActorRef;
+import akka.event.LoggingAdapter;
 import com.google.protobuf.ByteString;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
@@ -38,9 +39,13 @@ import java.util.ArrayList;
 public class Follower extends AbstractRaftActorBehavior {
     private ByteString snapshotChunksCollected = ByteString.EMPTY;
 
+    private final LoggingAdapter LOG;
+
     public Follower(RaftActorContext context) {
         super(context);
 
+        LOG = context.getLogger();
+
         scheduleElection(electionDuration());
     }
 
@@ -48,8 +53,9 @@ public class Follower extends AbstractRaftActorBehavior {
         AppendEntries appendEntries) {
 
         if(appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) {
-            context.getLogger()
-                .debug(appendEntries.toString());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug(appendEntries.toString());
+            }
         }
 
         // TODO : Refactor this method into a bunch of smaller methods
@@ -79,9 +85,10 @@ public class Follower extends AbstractRaftActorBehavior {
             // an entry at prevLogIndex and this follower has no entries in
             // it's log.
 
-            context.getLogger().debug(
-                "The followers log is empty and the senders prevLogIndex is {}",
-                appendEntries.getPrevLogIndex());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("The followers log is empty and the senders prevLogIndex is {}",
+                    appendEntries.getPrevLogIndex());
+            }
 
         } else if (lastIndex() > -1
             && appendEntries.getPrevLogIndex() != -1
@@ -90,9 +97,10 @@ public class Follower extends AbstractRaftActorBehavior {
             // The follower's log is out of sync because the Leader's
             // prevLogIndex entry was not found in it's log
 
-            context.getLogger().debug(
-                "The log is not empty but the prevLogIndex {} was not found in it",
-                appendEntries.getPrevLogIndex());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("The log is not empty but the prevLogIndex {} was not found in it",
+                    appendEntries.getPrevLogIndex());
+            }
 
         } else if (lastIndex() > -1
             && previousEntry != null
@@ -102,10 +110,12 @@ public class Follower extends AbstractRaftActorBehavior {
             // prevLogIndex entry does exist in the follower's log but it has
             // a different term in it
 
-            context.getLogger().debug(
-                "Cannot append entries because previous entry term {}  is not equal to append entries prevLogTerm {}"
-                , previousEntry.getTerm()
-                , appendEntries.getPrevLogTerm());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug(
+                    "Cannot append entries because previous entry term {}  is not equal to append entries prevLogTerm {}"
+                    , previousEntry.getTerm()
+                    , appendEntries.getPrevLogTerm());
+            }
         } else {
             outOfSync = false;
         }
@@ -113,9 +123,12 @@ public class Follower extends AbstractRaftActorBehavior {
         if (outOfSync) {
             // We found that the log was out of sync so just send a negative
             // reply and return
-            context.getLogger().debug("Follower is out-of-sync, " +
-                "so sending negative reply, lastIndex():{}, lastTerm():{}",
-                lastIndex(), lastTerm());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Follower is out-of-sync, " +
+                        "so sending negative reply, lastIndex():{}, lastTerm():{}",
+                    lastIndex(), lastTerm()
+                );
+            }
             sender.tell(
                 new AppendEntriesReply(context.getId(), currentTerm(), false,
                     lastIndex(), lastTerm()), actor()
@@ -125,10 +138,12 @@ public class Follower extends AbstractRaftActorBehavior {
 
         if (appendEntries.getEntries() != null
             && appendEntries.getEntries().size() > 0) {
-            context.getLogger().debug(
-                "Number of entries to be appended = " + appendEntries
-                    .getEntries().size()
-            );
+            if(LOG.isDebugEnabled()) {
+                LOG.debug(
+                    "Number of entries to be appended = " + appendEntries
+                        .getEntries().size()
+                );
+            }
 
             // 3. If an existing entry conflicts with a new one (same index
             // but different terms), delete the existing entry and all that
@@ -151,10 +166,12 @@ public class Follower extends AbstractRaftActorBehavior {
                         continue;
                     }
 
-                    context.getLogger().debug(
-                        "Removing entries from log starting at "
-                            + matchEntry.getIndex()
-                    );
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug(
+                            "Removing entries from log starting at "
+                                + matchEntry.getIndex()
+                        );
+                    }
 
                     // Entries do not match so remove all subsequent entries
                     context.getReplicatedLog()
@@ -163,10 +180,12 @@ public class Follower extends AbstractRaftActorBehavior {
                 }
             }
 
-            context.getLogger().debug(
-                "After cleanup entries to be added from = " + (addEntriesFrom
-                    + lastIndex())
-            );
+            if(LOG.isDebugEnabled()) {
+                context.getLogger().debug(
+                    "After cleanup entries to be added from = " + (addEntriesFrom
+                        + lastIndex())
+                );
+            }
 
             // 4. Append any new entries not already in the log
             for (int i = addEntriesFrom;
@@ -181,8 +200,9 @@ public class Follower extends AbstractRaftActorBehavior {
                     .appendAndPersist(appendEntries.getEntries().get(i));
             }
 
-            context.getLogger().debug(
-                "Log size is now " + context.getReplicatedLog().size());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Log size is now " + context.getReplicatedLog().size());
+            }
         }
 
 
@@ -195,8 +215,9 @@ public class Follower extends AbstractRaftActorBehavior {
             context.getReplicatedLog().lastIndex()));
 
         if (prevCommitIndex != context.getCommitIndex()) {
-            context.getLogger()
-                .debug("Commit index set to " + context.getCommitIndex());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Commit index set to " + context.getCommitIndex());
+            }
         }
 
         // If commitIndex > lastApplied: increment lastApplied, apply
@@ -204,10 +225,14 @@ public class Follower extends AbstractRaftActorBehavior {
         // check if there are any entries to be applied. last-applied can be equal to last-index
         if (appendEntries.getLeaderCommit() > context.getLastApplied() &&
             context.getLastApplied() < lastIndex()) {
-            context.getLogger().debug("applyLogToStateMachine, " +
-                "appendEntries.getLeaderCommit():{}," +
-                "context.getLastApplied():{}, lastIndex():{}",
-                appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("applyLogToStateMachine, " +
+                        "appendEntries.getLeaderCommit():{}," +
+                        "context.getLastApplied():{}, lastIndex():{}",
+                    appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex()
+                );
+            }
+
             applyLogToStateMachine(appendEntries.getLeaderCommit());
         }
 
@@ -259,9 +284,13 @@ public class Follower extends AbstractRaftActorBehavior {
     }
 
     private void handleInstallSnapshot(ActorRef sender, InstallSnapshot installSnapshot) {
-        context.getLogger().debug("InstallSnapshot received by follower " +
-            "datasize:{} , Chunk:{}/{}", installSnapshot.getData().size(),
-            installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks());
+
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("InstallSnapshot received by follower " +
+                    "datasize:{} , Chunk:{}/{}", installSnapshot.getData().size(),
+                installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks()
+            );
+        }
 
         try {
             if (installSnapshot.getChunkIndex() == installSnapshot.getTotalChunks()) {
@@ -283,8 +312,11 @@ public class Follower extends AbstractRaftActorBehavior {
             } else {
                 // we have more to go
                 snapshotChunksCollected = snapshotChunksCollected.concat(installSnapshot.getData());
-                context.getLogger().debug("Chunk={},snapshotChunksCollected.size:{}",
-                    installSnapshot.getChunkIndex(), snapshotChunksCollected.size());
+
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("Chunk={},snapshotChunksCollected.size:{}",
+                        installSnapshot.getChunkIndex(), snapshotChunksCollected.size());
+                }
             }
 
             sender.tell(new InstallSnapshotReply(
index 90948ffef7d8a5e1341bb8aede6b03ccf8dae344..ff8a2256d3cb05a7f29a99f6741aa4948935145b 100644 (file)
@@ -11,6 +11,7 @@ package org.opendaylight.controller.cluster.raft.behaviors;
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.Cancellable;
+import akka.event.LoggingAdapter;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.ByteString;
 import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
@@ -80,25 +81,27 @@ public class Leader extends AbstractRaftActorBehavior {
 
     private final int minReplicationCount;
 
+    private final LoggingAdapter LOG;
+
     public Leader(RaftActorContext context) {
         super(context);
 
-        if (lastIndex() >= 0) {
-            context.setCommitIndex(lastIndex());
-        }
+        LOG = context.getLogger();
 
         followers = context.getPeerAddresses().keySet();
 
         for (String followerId : followers) {
             FollowerLogInformation followerLogInformation =
                 new FollowerLogInformationImpl(followerId,
-                    new AtomicLong(lastIndex()),
+                    new AtomicLong(context.getCommitIndex()),
                     new AtomicLong(-1));
 
             followerToLog.put(followerId, followerLogInformation);
         }
 
-        context.getLogger().debug("Election:Leader has following peers:"+ followers);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Election:Leader has following peers:" + followers);
+        }
 
         if (followers.size() > 0) {
             minReplicationCount = (followers.size() + 1) / 2 + 1;
@@ -123,7 +126,9 @@ public class Leader extends AbstractRaftActorBehavior {
     @Override protected RaftState handleAppendEntries(ActorRef sender,
         AppendEntries appendEntries) {
 
-        context.getLogger().debug(appendEntries.toString());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug(appendEntries.toString());
+        }
 
         return state();
     }
@@ -132,8 +137,9 @@ public class Leader extends AbstractRaftActorBehavior {
         AppendEntriesReply appendEntriesReply) {
 
         if(! appendEntriesReply.isSuccess()) {
-            context.getLogger()
-                .debug(appendEntriesReply.toString());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug(appendEntriesReply.toString());
+            }
         }
 
         // Update the FollowerLogInformation
@@ -142,7 +148,7 @@ public class Leader extends AbstractRaftActorBehavior {
             followerToLog.get(followerId);
 
         if(followerLogInformation == null){
-            context.getLogger().error("Unknown follower {}", followerId);
+            LOG.error("Unknown follower {}", followerId);
             return state();
         }
 
@@ -196,6 +202,16 @@ public class Leader extends AbstractRaftActorBehavior {
         return state();
     }
 
+    protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
+
+        ClientRequestTracker toRemove = findClientRequestTracker(logIndex);
+        if(toRemove != null) {
+            trackerList.remove(toRemove);
+        }
+
+        return toRemove;
+    }
+
     protected ClientRequestTracker findClientRequestTracker(long logIndex) {
         for (ClientRequestTracker tracker : trackerList) {
             if (tracker.getIndex() == logIndex) {
@@ -260,10 +276,13 @@ public class Leader extends AbstractRaftActorBehavior {
             if (reply.isSuccess()) {
                 if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) {
                     //this was the last chunk reply
-                    context.getLogger().debug("InstallSnapshotReply received, " +
-                        "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
-                        reply.getChunkIndex(), followerId,
-                        context.getReplicatedLog().getSnapshotIndex() + 1);
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("InstallSnapshotReply received, " +
+                                "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
+                            reply.getChunkIndex(), followerId,
+                            context.getReplicatedLog().getSnapshotIndex() + 1
+                        );
+                    }
 
                     FollowerLogInformation followerLogInformation =
                         followerToLog.get(followerId);
@@ -272,31 +291,38 @@ public class Leader extends AbstractRaftActorBehavior {
                     followerLogInformation.setNextIndex(
                         context.getReplicatedLog().getSnapshotIndex() + 1);
                     mapFollowerToSnapshot.remove(followerId);
-                    context.getLogger().debug("followerToLog.get(followerId).getNextIndex().get()=" +
-                        followerToLog.get(followerId).getNextIndex().get());
+
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("followerToLog.get(followerId).getNextIndex().get()=" +
+                            followerToLog.get(followerId).getNextIndex().get());
+                    }
 
                 } else {
                     followerToSnapshot.markSendStatus(true);
                 }
             } else {
-                context.getLogger().info("InstallSnapshotReply received, " +
-                    "sending snapshot chunk failed, Will retry, Chunk:{}",
-                    reply.getChunkIndex());
+                LOG.info("InstallSnapshotReply received, " +
+                        "sending snapshot chunk failed, Will retry, Chunk:{}",
+                    reply.getChunkIndex()
+                );
                 followerToSnapshot.markSendStatus(false);
             }
 
         } else {
-            context.getLogger().error("ERROR!!" +
-                "FollowerId in InstallSnapshotReply not known to Leader" +
-                " or Chunk Index in InstallSnapshotReply not matching {} != {}",
-                followerToSnapshot.getChunkIndex(), reply.getChunkIndex() );
+            LOG.error("ERROR!!" +
+                    "FollowerId in InstallSnapshotReply not known to Leader" +
+                    " or Chunk Index in InstallSnapshotReply not matching {} != {}",
+                followerToSnapshot.getChunkIndex(), reply.getChunkIndex()
+            );
         }
     }
 
     private void replicate(Replicate replicate) {
         long logIndex = replicate.getReplicatedLogEntry().getIndex();
 
-        context.getLogger().debug("Replicate message " + logIndex);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Replicate message " + logIndex);
+        }
 
         // Create a tracker entry we will use this later to notify the
         // client actor
@@ -350,10 +376,13 @@ public class Leader extends AbstractRaftActorBehavior {
                         if (followerNextIndex >= 0 && leaderLastIndex >= followerNextIndex ) {
                             // if the follower is just not starting and leader's index
                             // is more than followers index
-                            context.getLogger().debug("SendInstallSnapshot to follower:{}," +
-                                "follower-nextIndex:{}, leader-snapshot-index:{},  " +
-                                "leader-last-index:{}", followerId,
-                                followerNextIndex, leaderSnapShotIndex, leaderLastIndex);
+                            if(LOG.isDebugEnabled()) {
+                                LOG.debug("SendInstallSnapshot to follower:{}," +
+                                        "follower-nextIndex:{}, leader-snapshot-index:{},  " +
+                                        "leader-last-index:{}", followerId,
+                                    followerNextIndex, leaderSnapShotIndex, leaderLastIndex
+                                );
+                            }
 
                             actor().tell(new SendInstallSnapshot(), actor());
                         } else {
@@ -412,11 +441,11 @@ public class Leader extends AbstractRaftActorBehavior {
                 ).toSerializable(),
                 actor()
             );
-            context.getLogger().info("InstallSnapshot sent to follower {}, Chunk: {}/{}",
+            LOG.info("InstallSnapshot sent to follower {}, Chunk: {}/{}",
                 followerActor.path(), mapFollowerToSnapshot.get(followerId).getChunkIndex(),
                 mapFollowerToSnapshot.get(followerId).getTotalChunks());
         } catch (IOException e) {
-            context.getLogger().error("InstallSnapshot failed for Leader.", e);
+            LOG.error("InstallSnapshot failed for Leader.", e);
         }
     }
 
@@ -431,7 +460,9 @@ public class Leader extends AbstractRaftActorBehavior {
             mapFollowerToSnapshot.put(followerId, followerToSnapshot);
         }
         ByteString nextChunk = followerToSnapshot.getNextChunk();
-        context.getLogger().debug("Leader's snapshot nextChunk size:{}", nextChunk.size());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Leader's snapshot nextChunk size:{}", nextChunk.size());
+        }
 
         return nextChunk;
     }
@@ -526,8 +557,10 @@ public class Leader extends AbstractRaftActorBehavior {
             int size = snapshotBytes.size();
             totalChunks = ( size / context.getConfigParams().getSnapshotChunkSize()) +
                 ((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0);
-            context.getLogger().debug("Snapshot {} bytes, total chunks to send:{}",
-                size, totalChunks);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Snapshot {} bytes, total chunks to send:{}",
+                    size, totalChunks);
+            }
         }
 
         public ByteString getSnapshotBytes() {
@@ -591,8 +624,10 @@ public class Leader extends AbstractRaftActorBehavior {
                 }
             }
 
-            context.getLogger().debug("length={}, offset={},size={}",
-                snapshotLength, start, size);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("length={}, offset={},size={}",
+                    snapshotLength, start, size);
+            }
             return getSnapshotBytes().substring(start, start + size);
 
         }
index ca34a34ca49337783ec7fda8a9b9966fba1f16ac..0d5f644b3db2e559dbd22a318c9f7dd067e7cb2f 100644 (file)
@@ -200,6 +200,10 @@ public class MockRaftActorContext implements RaftActorContext {
     public static class MockPayload extends Payload implements Serializable {
         private String value = "";
 
+        public MockPayload(){
+
+        }
+
         public MockPayload(String s) {
             this.value = s;
         }
@@ -251,4 +255,24 @@ public class MockRaftActorContext implements RaftActorContext {
             return index;
         }
     }
+
+    public static class MockReplicatedLogBuilder {
+        private ReplicatedLog mockLog = new SimpleReplicatedLog();
+
+        public  MockReplicatedLogBuilder createEntries(int start, int end, int term) {
+            for (int i=start; i<end; i++) {
+                this.mockLog.append(new ReplicatedLogImplEntry(i, term, new MockRaftActorContext.MockPayload("foo" + i)));
+            }
+            return this;
+        }
+
+        public  MockReplicatedLogBuilder addEntry(int index, int term, MockPayload payload) {
+            this.mockLog.append(new ReplicatedLogImplEntry(index, term, payload));
+            return this;
+        }
+
+        public ReplicatedLog build() {
+            return this.mockLog;
+        }
+    }
 }
index 12123db12995061901a39a264c79f0237d78d00a..22f374319cfdbca12115fad320949c7b277a45a5 100644 (file)
@@ -2,42 +2,124 @@ package org.opendaylight.controller.cluster.raft;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
 import akka.actor.Props;
+import akka.actor.Terminated;
 import akka.event.Logging;
 import akka.japi.Creator;
 import akka.testkit.JavaTestKit;
+import akka.testkit.TestActorRef;
+import com.google.common.base.Optional;
 import com.google.protobuf.ByteString;
+import org.junit.After;
 import org.junit.Test;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
-
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
+import org.opendaylight.controller.cluster.raft.utils.MockAkkaJournal;
+import org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore;
+import scala.concurrent.duration.FiniteDuration;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collections;
+import java.util.List;
 import java.util.Map;
-
-import static junit.framework.TestCase.assertEquals;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import static org.junit.Assert.assertEquals;
 
 public class RaftActorTest extends AbstractActorTest {
 
 
+    @After
+    public void tearDown() {
+        MockAkkaJournal.clearJournal();
+        MockSnapshotStore.setMockSnapshot(null);
+    }
+
     public static class MockRaftActor extends RaftActor {
 
-        public MockRaftActor(String id,
-            Map<String, String> peerAddresses) {
-            super(id, peerAddresses);
+        public static final class MockRaftActorCreator implements Creator<MockRaftActor> {
+            private final Map<String, String> peerAddresses;
+            private final String id;
+            private final Optional<ConfigParams> config;
+
+            private MockRaftActorCreator(Map<String, String> peerAddresses, String id,
+                    Optional<ConfigParams> config) {
+                this.peerAddresses = peerAddresses;
+                this.id = id;
+                this.config = config;
+            }
+
+            @Override
+            public MockRaftActor create() throws Exception {
+                return new MockRaftActor(id, peerAddresses, config);
+            }
         }
 
-        public static Props props(final String id, final Map<String, String> peerAddresses){
-            return Props.create(new Creator<MockRaftActor>(){
+        private final CountDownLatch recoveryComplete = new CountDownLatch(1);
+        private final List<Object> state;
 
-                @Override public MockRaftActor create() throws Exception {
-                    return new MockRaftActor(id, peerAddresses);
-                }
-            });
+        public MockRaftActor(String id, Map<String, String> peerAddresses, Optional<ConfigParams> config) {
+            super(id, peerAddresses, config);
+            state = new ArrayList<>();
+        }
+
+        public void waitForRecoveryComplete() {
+            try {
+                assertEquals("Recovery complete", true, recoveryComplete.await(5,  TimeUnit.SECONDS));
+            } catch (InterruptedException e) {
+                e.printStackTrace();
+            }
+        }
+
+        public List<Object> getState() {
+            return state;
+        }
+
+        public static Props props(final String id, final Map<String, String> peerAddresses,
+                Optional<ConfigParams> config){
+            return Props.create(new MockRaftActorCreator(peerAddresses, id, config));
+        }
+
+        @Override protected void applyState(ActorRef clientActor, String identifier, Object data) {
+        }
+
+        @Override
+        protected void startLogRecoveryBatch(int maxBatchSize) {
+        }
+
+        @Override
+        protected void appendRecoveredLogEntry(Payload data) {
+            state.add(data);
+        }
+
+        @Override
+        protected void applyCurrentLogRecoveryBatch() {
+        }
+
+        @Override
+        protected void onRecoveryComplete() {
+            recoveryComplete.countDown();
         }
 
-        @Override protected void applyState(ActorRef clientActor,
-            String identifier,
-            Object data) {
+        @Override
+        protected void applyRecoverySnapshot(ByteString snapshot) {
+            try {
+                Object data = toObject(snapshot);
+                System.out.println("!!!!!applyRecoverySnapshot: "+data);
+                if (data instanceof List) {
+                    state.addAll((List) data);
+                }
+            } catch (Exception e) {
+                e.printStackTrace();
+            }
         }
 
         @Override protected void createSnapshot() {
@@ -45,7 +127,6 @@ public class RaftActorTest extends AbstractActorTest {
         }
 
         @Override protected void applySnapshot(ByteString snapshot) {
-            throw new UnsupportedOperationException("applySnapshot");
         }
 
         @Override protected void onStateChanged() {
@@ -55,6 +136,26 @@ public class RaftActorTest extends AbstractActorTest {
             return this.getId();
         }
 
+        private Object toObject(ByteString bs) throws ClassNotFoundException, IOException {
+            Object obj = null;
+            ByteArrayInputStream bis = null;
+            ObjectInputStream ois = null;
+            try {
+                bis = new ByteArrayInputStream(bs.toByteArray());
+                ois = new ObjectInputStream(bis);
+                obj = ois.readObject();
+            } finally {
+                if (bis != null) {
+                    bis.close();
+                }
+                if (ois != null) {
+                    ois.close();
+                }
+            }
+            return obj;
+        }
+
+
     }
 
 
@@ -64,9 +165,8 @@ public class RaftActorTest extends AbstractActorTest {
         public RaftActorTestKit(ActorSystem actorSystem, String actorName) {
             super(actorSystem);
 
-            raftActor = this.getSystem()
-                .actorOf(MockRaftActor.props(actorName,
-                    Collections.EMPTY_MAP), actorName);
+            raftActor = this.getSystem().actorOf(MockRaftActor.props(actorName,
+                    Collections.EMPTY_MAP, Optional.<ConfigParams>absent()), actorName);
 
         }
 
@@ -76,6 +176,7 @@ public class RaftActorTest extends AbstractActorTest {
             return
                 new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
                 ) {
+                    @Override
                     protected Boolean run() {
                         return true;
                     }
@@ -87,37 +188,15 @@ public class RaftActorTest extends AbstractActorTest {
         }
 
         public void findLeader(final String expectedLeader){
+            raftActor.tell(new FindLeader(), getRef());
 
-
-            new Within(duration("1 seconds")) {
-                protected void run() {
-
-                    raftActor.tell(new FindLeader(), getRef());
-
-                    String s = new ExpectMsg<String>(duration("1 seconds"),
-                        "findLeader") {
-                        // do not put code outside this method, will run afterwards
-                        protected String match(Object in) {
-                            if (in instanceof FindLeaderReply) {
-                                return ((FindLeaderReply) in).getLeaderActor();
-                            } else {
-                                throw noMatch();
-                            }
-                        }
-                    }.get();// this extracts the received message
-
-                    assertEquals(expectedLeader, s);
-
-                }
-
-
-            };
+            FindLeaderReply reply = expectMsgClass(duration("5 seconds"), FindLeaderReply.class);
+            assertEquals("getLeaderActor", expectedLeader, reply.getLeaderActor());
         }
 
         public ActorRef getRaftActor() {
             return raftActor;
         }
-
     }
 
 
@@ -134,5 +213,104 @@ public class RaftActorTest extends AbstractActorTest {
         kit.findLeader(kit.getRaftActor().path().toString());
     }
 
+    @Test
+    public void testRaftActorRecovery() throws Exception {
+        new JavaTestKit(getSystem()) {{
+            String persistenceId = "follower10";
+
+            DefaultConfigParamsImpl config = new DefaultConfigParamsImpl();
+            // Set the heartbeat interval high to essentially disable election otherwise the test
+            // may fail if the actor is switched to Leader and the commitIndex is set to the last
+            // log entry.
+            config.setHeartBeatInterval(new FiniteDuration(1, TimeUnit.DAYS));
+
+            ActorRef followerActor = getSystem().actorOf(MockRaftActor.props(persistenceId,
+                    Collections.EMPTY_MAP, Optional.<ConfigParams>of(config)), persistenceId);
+
+            watch(followerActor);
+
+            List<ReplicatedLogEntry> snapshotUnappliedEntries = new ArrayList<>();
+            ReplicatedLogEntry entry1 = new MockRaftActorContext.MockReplicatedLogEntry(1, 4,
+                    new MockRaftActorContext.MockPayload("E"));
+            snapshotUnappliedEntries.add(entry1);
+
+            int lastAppliedDuringSnapshotCapture = 3;
+            int lastIndexDuringSnapshotCapture = 4;
+
+                // 4 messages as part of snapshot, which are applied to state
+            ByteString snapshotBytes  = fromObject(Arrays.asList(
+                        new MockRaftActorContext.MockPayload("A"),
+                        new MockRaftActorContext.MockPayload("B"),
+                        new MockRaftActorContext.MockPayload("C"),
+                        new MockRaftActorContext.MockPayload("D")));
+
+            Snapshot snapshot = Snapshot.create(snapshotBytes.toByteArray(),
+                    snapshotUnappliedEntries, lastIndexDuringSnapshotCapture, 1 ,
+                    lastAppliedDuringSnapshotCapture, 1);
+            MockSnapshotStore.setMockSnapshot(snapshot);
+            MockSnapshotStore.setPersistenceId(persistenceId);
+
+            // add more entries after snapshot is taken
+            List<ReplicatedLogEntry> entries = new ArrayList<>();
+            ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 5,
+                    new MockRaftActorContext.MockPayload("F"));
+            ReplicatedLogEntry entry3 = new MockRaftActorContext.MockReplicatedLogEntry(1, 6,
+                    new MockRaftActorContext.MockPayload("G"));
+            ReplicatedLogEntry entry4 = new MockRaftActorContext.MockReplicatedLogEntry(1, 7,
+                    new MockRaftActorContext.MockPayload("H"));
+            entries.add(entry2);
+            entries.add(entry3);
+            entries.add(entry4);
+
+            int lastAppliedToState = 5;
+            int lastIndex = 7;
+
+            MockAkkaJournal.addToJournal(5, entry2);
+            // 2 entries are applied to state besides the 4 entries in snapshot
+            MockAkkaJournal.addToJournal(6, new ApplyLogEntries(lastAppliedToState));
+            MockAkkaJournal.addToJournal(7, entry3);
+            MockAkkaJournal.addToJournal(8, entry4);
+
+            // kill the actor
+            followerActor.tell(PoisonPill.getInstance(), null);
+            expectMsgClass(duration("5 seconds"), Terminated.class);
+
+            unwatch(followerActor);
+
+            //reinstate the actor
+            TestActorRef<MockRaftActor> ref = TestActorRef.create(getSystem(),
+                    MockRaftActor.props(persistenceId, Collections.EMPTY_MAP,
+                            Optional.<ConfigParams>of(config)));
+
+            ref.underlyingActor().waitForRecoveryComplete();
+
+            RaftActorContext context = ref.underlyingActor().getRaftActorContext();
+            assertEquals("Journal log size", snapshotUnappliedEntries.size() + entries.size(),
+                    context.getReplicatedLog().size());
+            assertEquals("Last index", lastIndex, context.getReplicatedLog().lastIndex());
+            assertEquals("Last applied", lastAppliedToState, context.getLastApplied());
+            assertEquals("Commit index", lastAppliedToState, context.getCommitIndex());
+            assertEquals("Recovered state size", 6, ref.underlyingActor().getState().size());
+        }};
+    }
 
+    private ByteString fromObject(Object snapshot) throws Exception {
+        ByteArrayOutputStream b = null;
+        ObjectOutputStream o = null;
+        try {
+            b = new ByteArrayOutputStream();
+            o = new ObjectOutputStream(b);
+            o.writeObject(snapshot);
+            byte[] snapshotBytes = b.toByteArray();
+            return ByteString.copyFrom(snapshotBytes);
+        } finally {
+            if (o != null) {
+                o.flush();
+                o.close();
+            }
+            if (b != null) {
+                b.close();
+            }
+        }
+    }
 }
index 227d1effa7e9b8b3bb65932fe8c25b1a2eecdbf5..a72a7c43324922748cad1837df047000cb1a4013 100644 (file)
@@ -3,6 +3,7 @@ package org.opendaylight.controller.cluster.raft.behaviors;
 import akka.actor.ActorRef;
 import akka.actor.Props;
 import akka.testkit.JavaTestKit;
+import com.google.protobuf.ByteString;
 import junit.framework.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
@@ -10,19 +11,29 @@ import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
 import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
 import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
 
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
 public class FollowerTest extends AbstractRaftActorBehaviorTest {
 
@@ -34,8 +45,12 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest {
         return new Follower(actorContext);
     }
 
-    @Override protected RaftActorContext createActorContext() {
-        return new MockRaftActorContext("test", getSystem(), followerActor);
+    @Override protected  RaftActorContext createActorContext() {
+        return createActorContext(followerActor);
+    }
+
+    protected  RaftActorContext createActorContext(ActorRef actorRef){
+        return new MockRaftActorContext("test", getSystem(), actorRef);
     }
 
     @Test
@@ -158,13 +173,14 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest {
                 createActorContext();
 
             context.setLastApplied(100);
-            setLastLogEntry((MockRaftActorContext) context, 1, 100, new MockRaftActorContext.MockPayload(""));
+            setLastLogEntry((MockRaftActorContext) context, 1, 100,
+                new MockRaftActorContext.MockPayload(""));
             ((MockRaftActorContext) context).getReplicatedLog().setSnapshotIndex(99);
 
             List<ReplicatedLogEntry> entries =
                 Arrays.asList(
-                    (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(2, 101,
-                        new MockRaftActorContext.MockPayload("foo"))
+                        (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(2, 101,
+                                new MockRaftActorContext.MockPayload("foo"))
                 );
 
             // The new commitIndex is 101
@@ -409,4 +425,140 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest {
         }};
     }
 
+
+    /**
+     * This test verifies that when InstallSnapshot is received by
+     * the follower its applied correctly.
+     *
+     * @throws Exception
+     */
+    @Test
+    public void testHandleInstallSnapshot() throws Exception {
+        JavaTestKit javaTestKit = new JavaTestKit(getSystem()) {{
+
+            ActorRef leaderActor = getSystem().actorOf(Props.create(
+                MessageCollectorActor.class));
+
+            MockRaftActorContext context = (MockRaftActorContext)
+                createActorContext(getRef());
+
+            Follower follower = (Follower)createBehavior(context);
+
+            HashMap<String, String> followerSnapshot = new HashMap<>();
+            followerSnapshot.put("1", "A");
+            followerSnapshot.put("2", "B");
+            followerSnapshot.put("3", "C");
+
+            ByteString bsSnapshot  = toByteString(followerSnapshot);
+            ByteString chunkData = ByteString.EMPTY;
+            int offset = 0;
+            int snapshotLength = bsSnapshot.size();
+            int i = 1;
+
+            do {
+                chunkData = getNextChunk(bsSnapshot, offset);
+                final InstallSnapshot installSnapshot =
+                    new InstallSnapshot(1, "leader-1", i, 1,
+                        chunkData, i, 3);
+                follower.handleMessage(leaderActor, installSnapshot);
+                offset = offset + 50;
+                i++;
+            } while ((offset+50) < snapshotLength);
+
+            final InstallSnapshot installSnapshot3 = new InstallSnapshot(1, "leader-1", 3, 1, chunkData, 3, 3);
+            follower.handleMessage(leaderActor, installSnapshot3);
+
+            String[] matches = new ReceiveWhile<String>(String.class, duration("2 seconds")) {
+                @Override
+                protected String match(Object o) throws Exception {
+                    if (o instanceof ApplySnapshot) {
+                        ApplySnapshot as = (ApplySnapshot)o;
+                        if (as.getSnapshot().getLastIndex() != installSnapshot3.getLastIncludedIndex()) {
+                            return "applySnapshot-lastIndex-mismatch";
+                        }
+                        if (as.getSnapshot().getLastAppliedTerm() != installSnapshot3.getLastIncludedTerm()) {
+                            return "applySnapshot-lastAppliedTerm-mismatch";
+                        }
+                        if (as.getSnapshot().getLastAppliedIndex() != installSnapshot3.getLastIncludedIndex()) {
+                            return "applySnapshot-lastAppliedIndex-mismatch";
+                        }
+                        if (as.getSnapshot().getLastTerm() != installSnapshot3.getLastIncludedTerm()) {
+                            return "applySnapshot-lastTerm-mismatch";
+                        }
+                        return "applySnapshot";
+                    }
+
+                    return "ignoreCase";
+                }
+            }.get();
+
+            String applySnapshotMatch = "";
+            for (String reply: matches) {
+                if (reply.startsWith("applySnapshot")) {
+                    applySnapshotMatch = reply;
+                }
+            }
+
+            assertEquals("applySnapshot", applySnapshotMatch);
+
+            Object messages = executeLocalOperation(leaderActor, "get-all-messages");
+
+            assertNotNull(messages);
+            assertTrue(messages instanceof List);
+            List<Object> listMessages = (List<Object>) messages;
+
+            int installSnapshotReplyReceivedCount = 0;
+            for (Object message: listMessages) {
+                if (message instanceof InstallSnapshotReply) {
+                    ++installSnapshotReplyReceivedCount;
+                }
+            }
+
+            assertEquals(3, installSnapshotReplyReceivedCount);
+
+        }};
+    }
+
+    public Object executeLocalOperation(ActorRef actor, Object message) throws Exception {
+        return MessageCollectorActor.getAllMessages(actor);
+    }
+
+    public ByteString getNextChunk (ByteString bs, int offset){
+        int snapshotLength = bs.size();
+        int start = offset;
+        int size = 50;
+        if (50 > snapshotLength) {
+            size = snapshotLength;
+        } else {
+            if ((start + 50) > snapshotLength) {
+                size = snapshotLength - start;
+            }
+        }
+        return bs.substring(start, start + size);
+    }
+
+    private ByteString toByteString(Map<String, String> state) {
+        ByteArrayOutputStream b = null;
+        ObjectOutputStream o = null;
+        try {
+            try {
+                b = new ByteArrayOutputStream();
+                o = new ObjectOutputStream(b);
+                o.writeObject(state);
+                byte[] snapshotBytes = b.toByteArray();
+                return ByteString.copyFrom(snapshotBytes);
+            } finally {
+                if (o != null) {
+                    o.flush();
+                    o.close();
+                }
+                if (b != null) {
+                    b.close();
+                }
+            }
+        } catch (IOException e) {
+            org.junit.Assert.fail("IOException in converting Hashmap to Bytestring:" + e);
+        }
+        return null;
+    }
 }
index c4ef51d968422533f9df668bb23fd56563dc2ad2..19af64790ff7896f496f6585aa27939d76fa4f65 100644 (file)
@@ -20,9 +20,12 @@ import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
 import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
 import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
 import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
 import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
 
 import java.io.ByteArrayOutputStream;
@@ -171,18 +174,13 @@ public class LeaderTest extends AbstractRaftActorBehaviorTest {
 
                     actorContext.getReplicatedLog().removeFrom(0);
 
-                    actorContext.getReplicatedLog().append(new ReplicatedLogImplEntry(0, 1,
-                        new MockRaftActorContext.MockPayload("foo")));
-
-                    ReplicatedLogImplEntry entry =
-                        new ReplicatedLogImplEntry(1, 1,
-                            new MockRaftActorContext.MockPayload("foo"));
-
-                    actorContext.getReplicatedLog().append(entry);
+                    actorContext.setReplicatedLog(
+                        new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 2, 1)
+                            .build());
 
                     Leader leader = new Leader(actorContext);
                     RaftState raftState = leader
-                        .handleMessage(senderActor, new Replicate(null, "state-id",entry));
+                        .handleMessage(senderActor, new Replicate(null, "state-id",actorContext.getReplicatedLog().get(1)));
 
                     // State should not change
                     assertEquals(RaftState.Leader, raftState);
@@ -335,7 +333,6 @@ public class LeaderTest extends AbstractRaftActorBehaviorTest {
                         new ReplicatedLogImplEntry(newEntryIndex, currentTerm,
                             new MockRaftActorContext.MockPayload("D"));
 
-
                     RaftState raftState = leader.handleMessage(senderActor, new SendInstallSnapshot());
 
                     assertEquals(RaftState.Leader, raftState);
@@ -526,6 +523,157 @@ public class LeaderTest extends AbstractRaftActorBehaviorTest {
         return null;
     }
 
+    public static class ForwardMessageToBehaviorActor extends MessageCollectorActor {
+        private static AbstractRaftActorBehavior behavior;
+
+        public ForwardMessageToBehaviorActor(){
+
+        }
+
+        @Override public void onReceive(Object message) throws Exception {
+            super.onReceive(message);
+            behavior.handleMessage(sender(), message);
+        }
+
+        public static void setBehavior(AbstractRaftActorBehavior behavior){
+            ForwardMessageToBehaviorActor.behavior = behavior;
+        }
+    }
+
+    @Test
+    public void testLeaderCreatedWithCommitIndexLessThanLastIndex() throws Exception {
+        new JavaTestKit(getSystem()) {{
+
+            ActorRef leaderActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+            MockRaftActorContext leaderActorContext =
+                new MockRaftActorContext("leader", getSystem(), leaderActor);
+
+            ActorRef followerActor = getSystem().actorOf(Props.create(ForwardMessageToBehaviorActor.class));
+
+            MockRaftActorContext followerActorContext =
+                new MockRaftActorContext("follower", getSystem(), followerActor);
+
+            Follower follower = new Follower(followerActorContext);
+
+            ForwardMessageToBehaviorActor.setBehavior(follower);
+
+            Map<String, String> peerAddresses = new HashMap();
+            peerAddresses.put(followerActor.path().toString(),
+                followerActor.path().toString());
+
+            leaderActorContext.setPeerAddresses(peerAddresses);
+
+            leaderActorContext.getReplicatedLog().removeFrom(0);
+
+            //create 3 entries
+            leaderActorContext.setReplicatedLog(
+                new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+            leaderActorContext.setCommitIndex(1);
+
+            followerActorContext.getReplicatedLog().removeFrom(0);
+
+            // follower too has the exact same log entries and has the same commit index
+            followerActorContext.setReplicatedLog(
+                new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+            followerActorContext.setCommitIndex(1);
+
+            Leader leader = new Leader(leaderActorContext);
+
+            leader.handleMessage(leaderActor, new SendHeartBeat());
+
+            AppendEntriesMessages.AppendEntries appendEntries =
+                (AppendEntriesMessages.AppendEntries) MessageCollectorActor
+                    .getFirstMatching(followerActor, AppendEntriesMessages.AppendEntries.class);
+
+            assertNotNull(appendEntries);
+
+            assertEquals(1, appendEntries.getLeaderCommit());
+            assertEquals(1, appendEntries.getLogEntries(0).getIndex());
+            assertEquals(0, appendEntries.getPrevLogIndex());
+
+            AppendEntriesReply appendEntriesReply =
+                (AppendEntriesReply) MessageCollectorActor.getFirstMatching(
+                    leaderActor, AppendEntriesReply.class);
+
+            assertNotNull(appendEntriesReply);
+
+            // follower returns its next index
+            assertEquals(2, appendEntriesReply.getLogLastIndex());
+            assertEquals(1, appendEntriesReply.getLogLastTerm());
+
+        }};
+    }
+
+
+    @Test
+    public void testLeaderCreatedWithCommitIndexLessThanFollowersCommitIndex() throws Exception {
+        new JavaTestKit(getSystem()) {{
+
+            ActorRef leaderActor = getSystem().actorOf(Props.create(MessageCollectorActor.class));
+
+            MockRaftActorContext leaderActorContext =
+                new MockRaftActorContext("leader", getSystem(), leaderActor);
+
+            ActorRef followerActor = getSystem().actorOf(
+                Props.create(ForwardMessageToBehaviorActor.class));
+
+            MockRaftActorContext followerActorContext =
+                new MockRaftActorContext("follower", getSystem(), followerActor);
+
+            Follower follower = new Follower(followerActorContext);
+
+            ForwardMessageToBehaviorActor.setBehavior(follower);
+
+            Map<String, String> peerAddresses = new HashMap();
+            peerAddresses.put(followerActor.path().toString(),
+                followerActor.path().toString());
+
+            leaderActorContext.setPeerAddresses(peerAddresses);
+
+            leaderActorContext.getReplicatedLog().removeFrom(0);
+
+            leaderActorContext.setReplicatedLog(
+                new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+            leaderActorContext.setCommitIndex(1);
+
+            followerActorContext.getReplicatedLog().removeFrom(0);
+
+            followerActorContext.setReplicatedLog(
+                new MockRaftActorContext.MockReplicatedLogBuilder().createEntries(0, 3, 1).build());
+
+            // follower has the same log entries but its commit index > leaders commit index
+            followerActorContext.setCommitIndex(2);
+
+            Leader leader = new Leader(leaderActorContext);
+
+            leader.handleMessage(leaderActor, new SendHeartBeat());
+
+            AppendEntriesMessages.AppendEntries appendEntries =
+                (AppendEntriesMessages.AppendEntries) MessageCollectorActor
+                    .getFirstMatching(followerActor, AppendEntriesMessages.AppendEntries.class);
+
+            assertNotNull(appendEntries);
+
+            assertEquals(1, appendEntries.getLeaderCommit());
+            assertEquals(1, appendEntries.getLogEntries(0).getIndex());
+            assertEquals(0, appendEntries.getPrevLogIndex());
+
+            AppendEntriesReply appendEntriesReply =
+                (AppendEntriesReply) MessageCollectorActor.getFirstMatching(
+                    leaderActor, AppendEntriesReply.class);
+
+            assertNotNull(appendEntriesReply);
+
+            assertEquals(2, appendEntriesReply.getLogLastIndex());
+            assertEquals(1, appendEntriesReply.getLogLastTerm());
+
+        }};
+    }
+
     private static class LeaderTestKit extends JavaTestKit {
 
         private LeaderTestKit(ActorSystem actorSystem) {
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MessageCollectorActor.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MessageCollectorActor.java
new file mode 100644 (file)
index 0000000..5892845
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.actor.ActorRef;
+import akka.actor.UntypedActor;
+import akka.pattern.Patterns;
+import akka.util.Timeout;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+
+public class MessageCollectorActor extends UntypedActor {
+    private List<Object> messages = new ArrayList<>();
+
+    @Override public void onReceive(Object message) throws Exception {
+        if(message instanceof String){
+            if("get-all-messages".equals(message)){
+                getSender().tell(messages, getSelf());
+            }
+        } else {
+            messages.add(message);
+        }
+    }
+
+    public static List<Object> getAllMessages(ActorRef actor) throws Exception {
+        FiniteDuration operationDuration = Duration.create(5, TimeUnit.SECONDS);
+        Timeout operationTimeout = new Timeout(operationDuration);
+        Future<Object> future = Patterns.ask(actor, "get-all-messages", operationTimeout);
+
+        try {
+            return (List<Object>) Await.result(future, operationDuration);
+        } catch (Exception e) {
+            throw e;
+        }
+    }
+
+    /**
+     * Get the first message that matches the specified class
+     * @param actor
+     * @param clazz
+     * @return
+     */
+    public static Object getFirstMatching(ActorRef actor, Class clazz) throws Exception {
+        List<Object> allMessages = getAllMessages(actor);
+
+        for(Object message : allMessages){
+            if(message.getClass().equals(clazz)){
+                return message;
+            }
+        }
+
+        return null;
+    }
+
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MockAkkaJournal.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MockAkkaJournal.java
new file mode 100644 (file)
index 0000000..85edc07
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.dispatch.Futures;
+import akka.japi.Procedure;
+import akka.persistence.PersistentConfirmation;
+import akka.persistence.PersistentId;
+import akka.persistence.PersistentImpl;
+import akka.persistence.PersistentRepr;
+import akka.persistence.journal.japi.AsyncWriteJournal;
+import com.google.common.collect.Maps;
+import scala.concurrent.Future;
+
+import java.util.Map;
+import java.util.concurrent.Callable;
+
+public class MockAkkaJournal extends AsyncWriteJournal {
+
+    private static Map<Long, Object> journal = Maps.newHashMap();
+
+    public static void addToJournal(long sequenceNr, Object message) {
+        journal.put(sequenceNr, message);
+    }
+
+    public static void clearJournal() {
+        journal.clear();
+    }
+
+    @Override
+    public Future<Void> doAsyncReplayMessages(final String persistenceId, long fromSequenceNr,
+        long toSequenceNr, long max, final Procedure<PersistentRepr> replayCallback) {
+
+        return Futures.future(new Callable<Void>() {
+            @Override
+            public Void call() throws Exception {
+                for (Map.Entry<Long,Object> entry : journal.entrySet()) {
+                    PersistentRepr persistentMessage =
+                        new PersistentImpl(entry.getValue(), entry.getKey(), persistenceId, false, null, null);
+                    replayCallback.apply(persistentMessage);
+                }
+                return null;
+            }
+        }, context().dispatcher());
+    }
+
+    @Override
+    public Future<Long> doAsyncReadHighestSequenceNr(String s, long l) {
+        return Futures.successful(new Long(0));
+    }
+
+    @Override
+    public Future<Void> doAsyncWriteMessages(Iterable<PersistentRepr> persistentReprs) {
+        return Futures.successful(null);
+    }
+
+    @Override
+    public Future<Void> doAsyncWriteConfirmations(Iterable<PersistentConfirmation> persistentConfirmations) {
+        return Futures.successful(null);
+    }
+
+    @Override
+    public Future<Void> doAsyncDeleteMessages(Iterable<PersistentId> persistentIds, boolean b) {
+        return Futures.successful(null);
+    }
+
+    @Override
+    public Future<Void> doAsyncDeleteMessagesTo(String s, long l, boolean b) {
+        return Futures.successful(null);
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MockSnapshotStore.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MockSnapshotStore.java
new file mode 100644 (file)
index 0000000..d70bf92
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.dispatch.Futures;
+import akka.japi.Option;
+import akka.persistence.SelectedSnapshot;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.snapshot.japi.SnapshotStore;
+import org.opendaylight.controller.cluster.raft.Snapshot;
+import scala.concurrent.Future;
+
+
+public class MockSnapshotStore  extends SnapshotStore {
+
+    private static Snapshot mockSnapshot;
+    private static String persistenceId;
+
+    public static void setMockSnapshot(Snapshot s) {
+        mockSnapshot = s;
+    }
+
+    public static void setPersistenceId(String pId) {
+        persistenceId = pId;
+    }
+
+    @Override
+    public Future<Option<SelectedSnapshot>> doLoadAsync(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) {
+        if (mockSnapshot == null) {
+            return Futures.successful(Option.<SelectedSnapshot>none());
+        }
+
+        SnapshotMetadata smd = new SnapshotMetadata(persistenceId, 1, 12345);
+        SelectedSnapshot selectedSnapshot =
+            new SelectedSnapshot(smd, mockSnapshot);
+        return Futures.successful(Option.some(selectedSnapshot));
+    }
+
+    @Override
+    public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
+        return null;
+    }
+
+    @Override
+    public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
+
+    }
+
+    @Override
+    public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
+
+    }
+
+    @Override
+    public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) throws Exception {
+
+    }
+}
index 2b753004c48265620628fdbc58a1e41a96a65c51..2f53d4a4ebef7121cf20f2241e97929b6e324964 100644 (file)
@@ -1,4 +1,7 @@
 akka {
+    persistence.snapshot-store.plugin = "mock-snapshot-store"
+    persistence.journal.plugin = "mock-journal"
+
     loglevel = "DEBUG"
     loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
 
@@ -19,3 +22,17 @@ akka {
         }
     }
 }
+
+mock-snapshot-store {
+  # Class name of the plugin.
+  class = "org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore"
+  # Dispatcher for the plugin actor.
+  plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
+
+mock-journal {
+  # Class name of the plugin.
+  class = "org.opendaylight.controller.cluster.raft.utils.MockAkkaJournal"
+  # Dispatcher for the plugin actor.
+  plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
index 73c81ca3a343b21cc4b879163e9b69fdeb385693..36b7a0f5f2754cd54be26bd932c99e6e361a9631 100644 (file)
@@ -34,15 +34,15 @@ final class BindingTranslatedTransactionChain implements BindingTransactionChain
 
     private final DOMTransactionChain delegate;
     private final BindingToNormalizedNodeCodec codec;
-    private final DelegateChainListener delegatingListener;
-    private final TransactionChainListener listener;
+    private final DelegateChainListener domListener;
+    private final TransactionChainListener bindingListener;
 
     public BindingTranslatedTransactionChain(final DOMDataBroker chainFactory,
             final BindingToNormalizedNodeCodec codec, final TransactionChainListener listener) {
         Preconditions.checkNotNull(chainFactory, "DOM Transaction chain factory must not be null");
-        this.delegatingListener = new DelegateChainListener();
-        this.listener = listener;
-        this.delegate = chainFactory.createTransactionChain(listener);
+        this.domListener = new DelegateChainListener();
+        this.bindingListener = listener;
+        this.delegate = chainFactory.createTransactionChain(domListener);
         this.codec = codec;
     }
 
@@ -110,7 +110,7 @@ final class BindingTranslatedTransactionChain implements BindingTransactionChain
          * chain, so we are not changing any of our internal state
          * to mark that we failed.
          */
-        this.delegatingListener.onTransactionChainFailed(this, tx, t);
+        this.bindingListener.onTransactionChainFailed(this, tx, t);
     }
 
     @Override
@@ -141,7 +141,7 @@ final class BindingTranslatedTransactionChain implements BindingTransactionChain
         public void onTransactionChainSuccessful(final TransactionChain<?, ?> chain) {
             Preconditions.checkState(delegate.equals(chain),
                     "Illegal state - listener for %s was invoked for incorrect chain %s.", delegate, chain);
-            listener.onTransactionChainSuccessful(BindingTranslatedTransactionChain.this);
+            bindingListener.onTransactionChainSuccessful(BindingTranslatedTransactionChain.this);
         }
     }
 
index ef56d02a2efc245e8f7cb35f01bac9c1a8eba3d6..cf37cbdd005effaacb2294edd9308b8598e9788f 100644 (file)
@@ -17,7 +17,9 @@ public abstract class AbstractUntypedActor extends UntypedActor {
         Logging.getLogger(getContext().system(), this);
 
     public AbstractUntypedActor() {
-        LOG.debug("Actor created {}", getSelf());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Actor created {}", getSelf());
+        }
         getContext().
             system().
             actorSelection("user/termination-monitor").
@@ -27,11 +29,13 @@ public abstract class AbstractUntypedActor extends UntypedActor {
 
     @Override public void onReceive(Object message) throws Exception {
         final String messageType = message.getClass().getSimpleName();
-        LOG.debug("Received message {}", messageType);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Received message {}", messageType);
+        }
         handleReceive(message);
-
-        LOG.debug("Done handling message {}", messageType);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Done handling message {}", messageType);
+        }
     }
 
     protected abstract void handleReceive(Object message) throws Exception;
@@ -41,7 +45,9 @@ public abstract class AbstractUntypedActor extends UntypedActor {
     }
 
     protected void unknownMessage(Object message) throws Exception {
-        LOG.debug("Received unhandled message {}", message);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Received unhandled message {}", message);
+        }
         unhandled(message);
     }
 }
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java
new file mode 100644 (file)
index 0000000..36b2866
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
+import akka.persistence.UntypedPersistentActor;
+
+public abstract class AbstractUntypedPersistentActor extends UntypedPersistentActor {
+
+    protected final LoggingAdapter LOG =
+        Logging.getLogger(getContext().system(), this);
+
+    public AbstractUntypedPersistentActor() {
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Actor created {}", getSelf());
+        }
+        getContext().
+            system().
+            actorSelection("user/termination-monitor").
+            tell(new Monitor(getSelf()), getSelf());
+
+    }
+
+
+    @Override public void onReceiveCommand(Object message) throws Exception {
+        final String messageType = message.getClass().getSimpleName();
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Received message {}", messageType);
+        }
+        handleCommand(message);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Done handling message {}", messageType);
+        }
+
+    }
+
+    @Override public void onReceiveRecover(Object message) throws Exception {
+        final String messageType = message.getClass().getSimpleName();
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Received message {}", messageType);
+        }
+        handleRecover(message);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Done handling message {}", messageType);
+        }
+
+    }
+
+    protected abstract void handleRecover(Object message) throws Exception;
+
+    protected abstract void handleCommand(Object message) throws Exception;
+
+    protected void ignoreMessage(Object message) {
+        LOG.debug("Unhandled message {} ", message);
+    }
+
+    protected void unknownMessage(Object message) throws Exception {
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Received unhandled message {}", message);
+        }
+        unhandled(message);
+    }
+}
index 3e1bd35632385d3ec973ba96c882587867170b94..44da4a56683558e9b15b8a1e94d1af8c10d1a3df 100644 (file)
@@ -9,6 +9,7 @@
 package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
 
 import com.google.common.base.Preconditions;
+
 import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
 import org.opendaylight.yangtools.yang.common.SimpleDateFormatUtil;
 import org.opendaylight.yangtools.yang.data.api.Node;
@@ -36,6 +37,7 @@ import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.NormalizedNo
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Date;
+import java.util.EnumMap;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -232,7 +234,7 @@ public class NormalizedNodeSerializer {
 
     private static class DeSerializer implements NormalizedNodeDeSerializationContext {
         private static Map<NormalizedNodeType, DeSerializationFunction>
-            deSerializationFunctions = new HashMap<>();
+            deSerializationFunctions = new EnumMap<>(NormalizedNodeType.class);
 
         static {
             deSerializationFunctions.put(CONTAINER_NODE_TYPE,
@@ -447,8 +449,9 @@ public class NormalizedNodeSerializer {
 
         private NormalizedNode deSerialize(NormalizedNodeMessages.Node node){
             Preconditions.checkNotNull(node, "node should not be null");
-            DeSerializationFunction deSerializationFunction =
-                Preconditions.checkNotNull(deSerializationFunctions.get(NormalizedNodeType.values()[node.getIntType()]), "Unknown type " + node);
+
+            DeSerializationFunction deSerializationFunction = deSerializationFunctions.get(
+                    NormalizedNodeType.values()[node.getIntType()]);
 
             return deSerializationFunction.apply(this, node);
         }
@@ -544,8 +547,4 @@ public class NormalizedNodeSerializer {
             NormalizedNode apply(DeSerializer deserializer, NormalizedNodeMessages.Node node);
         }
     }
-
-
-
-
 }
index d7627c008eb9062a76b3540a679706c08c686b07..4fb676e5189bcb70158cccd1a563f3e54ba36405 100644 (file)
@@ -9,6 +9,7 @@
 package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
 
 import com.google.common.base.Preconditions;
+
 import org.opendaylight.controller.cluster.datastore.node.utils.NodeIdentifierFactory;
 import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
 import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
@@ -26,6 +27,7 @@ import java.util.Set;
 import static org.opendaylight.controller.cluster.datastore.node.utils.serialization.PathArgumentType.getSerializablePathArgumentType;
 
 public class PathArgumentSerializer {
+    private static final String REVISION_ARG = "?revision=";
     private static final Map<Class, PathArgumentAttributesGetter> pathArgumentAttributesGetters = new HashMap<>();
 
     public static NormalizedNodeMessages.PathArgument serialize(NormalizedNodeSerializationContext context, YangInstanceIdentifier.PathArgument pathArgument){
@@ -190,27 +192,24 @@ public class PathArgumentSerializer {
         // If this serializer is used qName cannot be null (see encodeQName)
         // adding null check only in case someone tried to deSerialize a protocol buffer node
         // that was not serialized using the PathArgumentSerializer
-        Preconditions.checkNotNull(qName, "qName should not be null");
-        Preconditions.checkArgument(!"".equals(qName.getLocalName()),
-            "qName.localName cannot be empty qName = " + qName.toString());
-        Preconditions.checkArgument(qName.getNamespace() != -1, "qName.namespace should be valid");
+//        Preconditions.checkNotNull(qName, "qName should not be null");
+//        Preconditions.checkArgument(qName.getNamespace() != -1, "qName.namespace should be valid");
 
-        StringBuilder sb = new StringBuilder();
         String namespace = context.getNamespace(qName.getNamespace());
-        String revision = "";
         String localName = context.getLocalName(qName.getLocalName());
+        StringBuilder sb;
         if(qName.getRevision() != -1){
-            revision = context.getRevision(qName.getRevision());
-            sb.append("(").append(namespace).append("?revision=").append(
-                revision).append(")").append(
-                localName);
+            String revision = context.getRevision(qName.getRevision());
+            sb = new StringBuilder(namespace.length() + REVISION_ARG.length() + revision.length() +
+                    localName.length() + 2);
+            sb.append('(').append(namespace).append(REVISION_ARG).append(
+                revision).append(')').append(localName);
         } else {
-            sb.append("(").append(namespace).append(")").append(
-                localName);
+            sb = new StringBuilder(namespace.length() + localName.length() + 2);
+            sb.append('(').append(namespace).append(')').append(localName);
         }
 
         return sb.toString();
-
     }
 
     /**
@@ -223,10 +222,6 @@ public class PathArgumentSerializer {
         NormalizedNodeDeSerializationContext context,
         NormalizedNodeMessages.PathArgument pathArgument) {
 
-        Preconditions.checkArgument(pathArgument.getIntType() >= 0
-            && pathArgument.getIntType() < PathArgumentType.values().length,
-            "Illegal PathArgumentType " + pathArgument.getIntType());
-
         switch(PathArgumentType.values()[pathArgument.getIntType()]){
             case NODE_IDENTIFIER_WITH_VALUE : {
 
@@ -272,13 +267,21 @@ public class PathArgumentSerializer {
         NormalizedNodeDeSerializationContext context,
         List<NormalizedNodeMessages.PathArgumentAttribute> attributesList) {
 
-        Map<QName, Object> map = new HashMap<>();
-
-        for(NormalizedNodeMessages.PathArgumentAttribute attribute : attributesList){
+        Map<QName, Object> map;
+        if(attributesList.size() == 1) {
+            NormalizedNodeMessages.PathArgumentAttribute attribute = attributesList.get(0);
             NormalizedNodeMessages.QName name = attribute.getName();
             Object value = parseAttribute(context, attribute);
+            map = Collections.singletonMap(QNameFactory.create(qNameToString(context, name)), value);
+        } else {
+            map = new HashMap<>();
+
+            for(NormalizedNodeMessages.PathArgumentAttribute attribute : attributesList){
+                NormalizedNodeMessages.QName name = attribute.getName();
+                Object value = parseAttribute(context, attribute);
 
-            map.put(QNameFactory.create(qNameToString(context, name)), value);
+                map.put(QNameFactory.create(qNameToString(context, name)), value);
+            }
         }
 
         return map;
index 04c95d61ceb20854a8f19308df74481998b4c776..8def754f117792ef8e02f327455a9aa54785cf3f 100644 (file)
@@ -8,7 +8,6 @@
 
 package org.opendaylight.controller.cluster.datastore.node.utils.serialization;
 
-import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.datastore.node.utils.QNameFactory;
 import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
 import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
@@ -70,9 +69,6 @@ public class ValueSerializer {
 
 
     private static Object deSerializeBasicTypes(int valueType, String value) {
-        Preconditions.checkArgument(valueType >= 0 && valueType < ValueType.values().length,
-            "Illegal value type " + valueType );
-
         switch(ValueType.values()[valueType]){
            case SHORT_TYPE: {
                return Short.valueOf(value);
index 8724dfe43a2b2ba70c41b3108ac50bb2ca040994..49db8967a685914924eb4e90d5914ecdbc1b43a8 100644 (file)
@@ -50,8 +50,9 @@ public enum ValueType {
     public static final ValueType getSerializableType(Object node){
         Preconditions.checkNotNull(node, "node should not be null");
 
-        if(types.containsKey(node.getClass())) {
-            return types.get(node.getClass());
+        ValueType type = types.get(node.getClass());
+        if(type != null) {
+            return type;
         } else if(node instanceof Set){
             return BITS_TYPE;
         }
index c9d5e89ae1079c51249acaaeaf61fe27e2b7577c..0f93f43c566639641c32a3a1472d0c84ae8e513f 100644 (file)
@@ -100,7 +100,9 @@ public class XmlStreamUtils {
     for (Entry<URI, String> e: prefixes.getPrefixes()) {
       writer.writeNamespace(e.getValue(), e.getKey().toString());
     }
-    LOG.debug("Instance identifier with Random prefix is now {}", str);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Instance identifier with Random prefix is now {}", str);
+    }
     writer.writeCharacters(str);
   }
 
@@ -169,7 +171,7 @@ public class XmlStreamUtils {
         DataSchemaNode childSchema = null;
         if (schema instanceof DataNodeContainer) {
           childSchema = SchemaUtils.findFirstSchema(child.getNodeType(), ((DataNodeContainer) schema).getChildNodes()).orNull();
-          if (childSchema == null) {
+          if (childSchema == null && LOG.isDebugEnabled()) {
             LOG.debug("Probably the data node \"{}\" does not conform to schema", child == null ? "" : child.getNodeType().getLocalName());
           }
         }
@@ -192,7 +194,9 @@ public class XmlStreamUtils {
    */
   public void writeValue(final @Nonnull XMLStreamWriter writer, final @Nonnull TypeDefinition<?> type, final Object value) throws XMLStreamException {
     if (value == null) {
-      LOG.debug("Value of {}:{} is null, not encoding it", type.getQName().getNamespace(), type.getQName().getLocalName());
+      if(LOG.isDebugEnabled()){
+        LOG.debug("Value of {}:{} is null, not encoding it", type.getQName().getNamespace(), type.getQName().getLocalName());
+      }
       return;
     }
 
@@ -232,18 +236,24 @@ public class XmlStreamUtils {
       writer.writeNamespace(prefix, qname.getNamespace().toString());
       writer.writeCharacters(prefix + ':' + qname.getLocalName());
     } else {
-      LOG.debug("Value of {}:{} is not a QName but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Value of {}:{} is not a QName but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
+      }
       writer.writeCharacters(String.valueOf(value));
     }
   }
 
   private static void write(final @Nonnull XMLStreamWriter writer, final @Nonnull InstanceIdentifierTypeDefinition type, final @Nonnull Object value) throws XMLStreamException {
     if (value instanceof YangInstanceIdentifier) {
-      LOG.debug("Writing InstanceIdentifier object {}", value);
+      if(LOG.isDebugEnabled()) {
+          LOG.debug("Writing InstanceIdentifier object {}", value);
+      }
       write(writer, (YangInstanceIdentifier)value);
     } else {
-      LOG.debug("Value of {}:{} is not an InstanceIdentifier but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
-      writer.writeCharacters(String.valueOf(value));
+      if(LOG.isDebugEnabled()) {
+          LOG.debug("Value of {}:{} is not an InstanceIdentifier but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
+      }
+        writer.writeCharacters(String.valueOf(value));
     }
   }
 }
index ea8f4a3ef19810a6d95ebc4211b0f4569b6e2716..d0cc2adb5f06e1a61859e57b6b086857e36d3f22 100644 (file)
@@ -74,7 +74,9 @@ public class XmlUtils {
    * @return xml String
    */
   public static String inputCompositeNodeToXml(CompositeNode cNode, SchemaContext schemaContext){
-    LOG.debug("Converting input composite node to xml {}", cNode);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Converting input composite node to xml {}", cNode);
+    }
     if (cNode == null) {
         return BLANK;
     }
@@ -88,12 +90,14 @@ public class XmlUtils {
       Set<RpcDefinition> rpcs =  schemaContext.getOperations();
       for(RpcDefinition rpc : rpcs) {
         if(rpc.getQName().equals(cNode.getNodeType())){
-          LOG.debug("Found the rpc definition from schema context matching with input composite node  {}", rpc.getQName());
-
+          if(LOG.isDebugEnabled()) {
+              LOG.debug("Found the rpc definition from schema context matching with input composite node  {}", rpc.getQName());
+          }
           CompositeNode inputContainer = cNode.getFirstCompositeByName(QName.create(cNode.getNodeType(), "input"));
           domTree = XmlDocumentUtils.toDocument(inputContainer, rpc.getInput(), XmlDocumentUtils.defaultValueCodecProvider());
-
-          LOG.debug("input composite node to document conversion complete, document is   {}", domTree);
+          if(LOG.isDebugEnabled()) {
+              LOG.debug("input composite node to document conversion complete, document is   {}", domTree);
+          }
           break;
         }
       }
@@ -111,7 +115,9 @@ public class XmlUtils {
    * @return xml string
    */
   public static String outputCompositeNodeToXml(CompositeNode cNode, SchemaContext schemaContext){
-    LOG.debug("Converting output composite node to xml {}", cNode);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Converting output composite node to xml {}", cNode);
+    }
     if (cNode == null) {
         return BLANK;
     }
@@ -125,12 +131,14 @@ public class XmlUtils {
       Set<RpcDefinition> rpcs =  schemaContext.getOperations();
       for(RpcDefinition rpc : rpcs) {
         if(rpc.getQName().equals(cNode.getNodeType())){
-          LOG.debug("Found the rpc definition from schema context matching with output composite node  {}", rpc.getQName());
-
+          if(LOG.isDebugEnabled()) {
+              LOG.debug("Found the rpc definition from schema context matching with output composite node  {}", rpc.getQName());
+          }
           CompositeNode outputContainer = cNode.getFirstCompositeByName(QName.create(cNode.getNodeType(), "output"));
           domTree = XmlDocumentUtils.toDocument(outputContainer, rpc.getOutput(), XmlDocumentUtils.defaultValueCodecProvider());
-
-          LOG.debug("output composite node to document conversion complete, document is   {}", domTree);
+          if(LOG.isDebugEnabled()) {
+              LOG.debug("output composite node to document conversion complete, document is   {}", domTree);
+          }
           break;
         }
       }
@@ -152,8 +160,9 @@ public class XmlUtils {
 
       LOG.error("Error during translation of Document to OutputStream", e);
     }
-    LOG.debug("Document to string conversion complete, xml string is  {} ",  writer.toString());
-
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Document to string conversion complete, xml string is  {} ", writer.toString());
+    }
     return writer.toString();
   }
 
@@ -188,7 +197,9 @@ public class XmlUtils {
    * @return CompositeNode object based on the input, if any of the input parameter is null, a null object is returned
    */
   public static CompositeNode inputXmlToCompositeNode(QName rpc, String xml,  SchemaContext schemaContext){
-    LOG.debug("Converting input xml to composite node {}", xml);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Converting input xml to composite node {}", xml);
+    }
     if (xml==null || xml.length()==0) {
         return null;
     }
@@ -208,8 +219,9 @@ public class XmlUtils {
       Set<RpcDefinition> rpcs =  schemaContext.getOperations();
       for(RpcDefinition rpcDef : rpcs) {
         if(rpcDef.getQName().equals(rpc)){
-          LOG.debug("found the rpc definition from schema context matching rpc  {}", rpc);
-
+          if(LOG.isDebugEnabled()) {
+              LOG.debug("found the rpc definition from schema context matching rpc  {}", rpc);
+          }
           if(rpcDef.getInput() == null) {
             LOG.warn("found rpc definition's input is null");
             return null;
@@ -225,9 +237,9 @@ public class XmlUtils {
 
           List<Node<?>> dataNodes = XmlDocumentUtils.toDomNodes(xmlData,
               Optional.of(rpcDef.getInput().getChildNodes()), schemaContext);
-
-          LOG.debug("Converted xml input to list of nodes  {}", dataNodes);
-
+          if(LOG.isDebugEnabled()) {
+              LOG.debug("Converted xml input to list of nodes  {}", dataNodes);
+          }
           final CompositeNodeBuilder<ImmutableCompositeNode> it = ImmutableCompositeNode.builder();
           it.setQName(rpc);
           it.add(ImmutableCompositeNode.create(input, dataNodes));
@@ -240,8 +252,9 @@ public class XmlUtils {
     } catch (IOException e) {
       LOG.error("Error during building data tree from XML", e);
     }
-
-    LOG.debug("Xml to composite node conversion complete {} ", compositeNode);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Xml to composite node conversion complete {} ", compositeNode);
+    }
     return compositeNode;
   }
 
index 58677103c2df020fd7ac3fdaaa38353a59a328a4..3de49ae296f391bfa9b33afb3524a3ed4e0cc3ae 100644 (file)
@@ -44,25 +44,47 @@ public class ThreadExecutorStatsMXBeanImpl extends AbstractMXBean
         this.executor = Preconditions.checkNotNull(executor);
     }
 
+    private static ThreadExecutorStatsMXBeanImpl createInternal(final Executor executor,
+            final String mBeanName, final String mBeanType, final String mBeanCategory) {
+        if (executor instanceof ThreadPoolExecutor) {
+            final ThreadExecutorStatsMXBeanImpl ret = new ThreadExecutorStatsMXBeanImpl(
+                    (ThreadPoolExecutor) executor, mBeanName, mBeanType, mBeanCategory);
+            return ret;
+        }
+
+        LOG.info("Executor {} is not supported", executor);
+        return null;
+    }
+
     /**
-     * Create a new bean for the statistics, which is already registered.
+     * Creates a new bean if the backing executor is a ThreadPoolExecutor and registers it.
      *
-     * @param executor
-     * @param mBeanName
-     * @param mBeanType
-     * @param mBeanCategory
-     * @return
+     * @param executor the backing {@link Executor}
+     * @param mBeanName Used as the <code>name</code> property in the bean's ObjectName.
+     * @param mBeanType Used as the <code>type</code> property in the bean's ObjectName.
+     * @param mBeanCategory Used as the <code>Category</code> property in the bean's ObjectName.
+     * @return a registered ThreadExecutorStatsMXBeanImpl instance if the backing executor
+     *         is a ThreadPoolExecutor, otherwise null.
      */
     public static ThreadExecutorStatsMXBeanImpl create(final Executor executor, final String mBeanName,
             final String mBeanType, @Nullable final String mBeanCategory) {
-        if (executor instanceof ThreadPoolExecutor) {
-            final ThreadExecutorStatsMXBeanImpl ret = new ThreadExecutorStatsMXBeanImpl((ThreadPoolExecutor) executor, mBeanName, mBeanType, mBeanCategory);
+        ThreadExecutorStatsMXBeanImpl ret = createInternal(executor, mBeanName, mBeanType, mBeanCategory);
+        if(ret != null) {
             ret.registerMBean();
-            return ret;
         }
 
-        LOG.info("Executor {} is not supported", executor);
-        return null;
+        return ret;
+    }
+
+    /**
+     * Creates a new bean if the backing executor is a ThreadPoolExecutor.
+     *
+     * @param executor the backing {@link Executor}
+     * @return a ThreadExecutorStatsMXBeanImpl instance if the backing executor
+     *         is a ThreadPoolExecutor, otherwise null.
+     */
+    public static ThreadExecutorStatsMXBeanImpl create(final Executor executor) {
+        return createInternal(executor, "", "", null);
     }
 
     @Override
index 1021ddeee7a5348f26cbaa8845059a515c537f92..83164b07d9431a70ce609139a476190a5488ab9a 100644 (file)
@@ -8,11 +8,12 @@
 
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.base.Preconditions;
-
+import org.opendaylight.controller.cluster.raft.ConfigParams;
+import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreConfigProperties;
 
 import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
 
 import java.util.concurrent.TimeUnit;
 
@@ -27,22 +28,30 @@ public class DatastoreContext {
     private final Duration shardTransactionIdleTimeout;
     private final int operationTimeoutInSeconds;
     private final String dataStoreMXBeanType;
+    private final ConfigParams shardRaftConfig;
 
     public DatastoreContext() {
-        this.dataStoreProperties = null;
-        this.dataStoreMXBeanType = "DistributedDatastore";
-        this.shardTransactionIdleTimeout = Duration.create(10, TimeUnit.MINUTES);
-        this.operationTimeoutInSeconds = 5;
+        this("DistributedDatastore", null, Duration.create(10, TimeUnit.MINUTES), 5, 1000, 20000, 500);
     }
 
     public DatastoreContext(String dataStoreMXBeanType,
             InMemoryDOMDataStoreConfigProperties dataStoreProperties,
             Duration shardTransactionIdleTimeout,
-            int operationTimeoutInSeconds) {
+            int operationTimeoutInSeconds,
+            int shardJournalRecoveryLogBatchSize,
+            int shardSnapshotBatchCount,
+            int shardHeartbeatIntervalInMillis) {
         this.dataStoreMXBeanType = dataStoreMXBeanType;
-        this.dataStoreProperties = Preconditions.checkNotNull(dataStoreProperties);
+        this.dataStoreProperties = dataStoreProperties;
         this.shardTransactionIdleTimeout = shardTransactionIdleTimeout;
         this.operationTimeoutInSeconds = operationTimeoutInSeconds;
+
+        DefaultConfigParamsImpl raftConfig = new DefaultConfigParamsImpl();
+        raftConfig.setHeartBeatInterval(new FiniteDuration(shardHeartbeatIntervalInMillis,
+                TimeUnit.MILLISECONDS));
+        raftConfig.setJournalRecoveryLogBatchSize(shardJournalRecoveryLogBatchSize);
+        raftConfig.setSnapshotBatchCount(shardSnapshotBatchCount);
+        shardRaftConfig = raftConfig;
     }
 
     public InMemoryDOMDataStoreConfigProperties getDataStoreProperties() {
@@ -60,4 +69,8 @@ public class DatastoreContext {
     public int getOperationTimeoutInSeconds() {
         return operationTimeoutInSeconds;
     }
+
+    public ConfigParams getShardRaftConfig() {
+        return shardRaftConfig;
+    }
 }
index bf541d95deadeb3e9ecce59c83cdb988934289a5..c780881a2ffad1ed50695b7a38111068ec2f8e3f 100644 (file)
@@ -76,9 +76,9 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, Au
 
         Preconditions.checkNotNull(path, "path should not be null");
         Preconditions.checkNotNull(listener, "listener should not be null");
-
-        LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
+        }
         ActorRef dataChangeListenerActor = actorContext.getActorSystem().actorOf(
             DataChangeListener.props(listener ));
 
@@ -108,11 +108,11 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, Au
             }, actorContext.getActorSystem().dispatcher());
             return listenerRegistrationProxy;
         }
-
-        LOG.debug(
-            "No local shard for shardName {} was found so returning a noop registration",
-            shardName);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug(
+                "No local shard for shardName {} was found so returning a noop registration",
+                shardName);
+        }
         return new NoOpDataChangeListenerRegistration(listener);
     }
 
index bf1eb056b577ce265c1a8e6d794a3572bf469ab5..ddb5989f096145cddd3b716afa8dfc1dce3311e3 100644 (file)
@@ -20,6 +20,7 @@ import akka.serialization.Serialization;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
@@ -35,7 +36,6 @@ import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChainReply;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
 import org.opendaylight.controller.cluster.datastore.messages.ForwardedCommitTransaction;
@@ -48,12 +48,11 @@ import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContex
 import org.opendaylight.controller.cluster.datastore.modification.Modification;
 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
 import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
-import org.opendaylight.controller.cluster.raft.ConfigParams;
-import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
 import org.opendaylight.controller.cluster.raft.RaftActor;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
@@ -68,14 +67,12 @@ import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import scala.concurrent.duration.FiniteDuration;
-
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
 
 /**
  * A Shard represents a portion of the logical data tree <br/>
@@ -85,8 +82,6 @@ import java.util.concurrent.TimeUnit;
  */
 public class Shard extends RaftActor {
 
-    private static final ConfigParams configParams = new ShardConfigParams();
-
     public static final String DEFAULT_NAME = "default";
 
     // The state of this Shard
@@ -115,11 +110,18 @@ public class Shard extends RaftActor {
 
     private ActorRef createSnapshotTransaction;
 
+    /**
+     * Coordinates persistence recovery on startup.
+     */
+    private ShardRecoveryCoordinator recoveryCoordinator;
+    private List<Object> currentLogRecoveryBatch;
+
     private final Map<String, DOMStoreTransactionChain> transactionChains = new HashMap<>();
 
-    private Shard(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses,
+    protected Shard(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses,
             DatastoreContext datastoreContext, SchemaContext schemaContext) {
-        super(name.toString(), mapPeerAddresses(peerAddresses), Optional.of(configParams));
+        super(name.toString(), mapPeerAddresses(peerAddresses),
+                Optional.of(datastoreContext.getShardRaftConfig()));
 
         this.name = name;
         this.datastoreContext = datastoreContext;
@@ -172,8 +174,11 @@ public class Shard extends RaftActor {
     }
 
     @Override public void onReceiveRecover(Object message) {
-        LOG.debug("onReceiveRecover: Received message {} from {}", message.getClass().toString(),
-            getSender());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("onReceiveRecover: Received message {} from {}",
+                message.getClass().toString(),
+                getSender());
+        }
 
         if (message instanceof RecoveryFailure){
             LOG.error(((RecoveryFailure) message).cause(), "Recovery failed because of this cause");
@@ -183,8 +188,11 @@ public class Shard extends RaftActor {
     }
 
     @Override public void onReceiveCommand(Object message) {
-        LOG.debug("onReceiveCommand: Received message {} from {}", message.getClass().toString(),
-            getSender());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("onReceiveCommand: Received message {} from {}",
+                message.getClass().toString(),
+                getSender());
+        }
 
         if(message.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
             // This must be for install snapshot. Don't want to open this up and trigger
@@ -299,7 +307,9 @@ public class Shard extends RaftActor {
             ShardTransactionIdentifier.builder()
                 .remoteTransactionId(remoteTransactionId)
                 .build();
-        LOG.debug("Creating transaction : {} ", transactionId);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Creating transaction : {} ", transactionId);
+        }
         ActorRef transactionActor =
             createTypedTransactionActor(transactionType, transactionId, transactionChainId);
 
@@ -326,40 +336,22 @@ public class Shard extends RaftActor {
         DOMStoreThreePhaseCommitCohort cohort =
             modificationToCohort.remove(serialized);
         if (cohort == null) {
-            LOG.debug(
-                "Could not find cohort for modification : {}. Writing modification using a new transaction",
-                modification);
-            DOMStoreWriteTransaction transaction =
-                store.newWriteOnlyTransaction();
-
-            LOG.debug("Created new transaction {}", transaction.getIdentifier().toString());
-
-            modification.apply(transaction);
-            try {
-                syncCommitTransaction(transaction);
-            } catch (InterruptedException | ExecutionException e) {
-                shardMBean.incrementFailedTransactionsCount();
-                LOG.error("Failed to commit", e);
-                return;
-            }
-            //we want to just apply the recovery commit and return
-            shardMBean.incrementCommittedTransactionCount();
+            // If there's no cached cohort then we must be applying replicated state.
+            commitWithNewTransaction(serialized);
             return;
         }
 
-
-        if(sender == null){
+        if(sender == null) {
             LOG.error("Commit failed. Sender cannot be null");
             return;
         }
 
-        final ListenableFuture<Void> future = cohort.commit();
-        final ActorRef self = getSelf();
+        ListenableFuture<Void> future = cohort.commit();
 
         Futures.addCallback(future, new FutureCallback<Void>() {
             @Override
             public void onSuccess(Void v) {
-                sender.tell(new CommitTransactionReply().toSerializable(), self);
+                sender.tell(new CommitTransactionReply().toSerializable(), getSelf());
                 shardMBean.incrementCommittedTransactionCount();
                 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
             }
@@ -368,12 +360,24 @@ public class Shard extends RaftActor {
             public void onFailure(Throwable t) {
                 LOG.error(t, "An exception happened during commit");
                 shardMBean.incrementFailedTransactionsCount();
-                sender.tell(new akka.actor.Status.Failure(t), self);
+                sender.tell(new akka.actor.Status.Failure(t), getSelf());
             }
         });
 
     }
 
+    private void commitWithNewTransaction(Object modification) {
+        DOMStoreWriteTransaction tx = store.newWriteOnlyTransaction();
+        MutableCompositeModification.fromSerializable(modification, schemaContext).apply(tx);
+        try {
+            syncCommitTransaction(tx);
+            shardMBean.incrementCommittedTransactionCount();
+        } catch (InterruptedException | ExecutionException e) {
+            shardMBean.incrementFailedTransactionsCount();
+            LOG.error(e, "Failed to commit");
+        }
+    }
+
     private void handleForwardedCommit(ForwardedCommitTransaction message) {
         Object serializedModification =
             message.getModification().toSerializable();
@@ -402,8 +406,10 @@ public class Shard extends RaftActor {
     private void registerChangeListener(
         RegisterChangeListener registerChangeListener) {
 
-        LOG.debug("registerDataChangeListener for {}", registerChangeListener
-            .getPath());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("registerDataChangeListener for {}", registerChangeListener
+                .getPath());
+        }
 
 
         ActorSelection dataChangeListenerPath = getContext()
@@ -431,48 +437,118 @@ public class Shard extends RaftActor {
             getContext().actorOf(
                 DataChangeListenerRegistration.props(registration));
 
-        LOG.debug(
-            "registerDataChangeListener sending reply, listenerRegistrationPath = {} "
-            , listenerRegistration.path().toString());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug(
+                "registerDataChangeListener sending reply, listenerRegistrationPath = {} "
+                , listenerRegistration.path().toString());
+        }
 
         getSender()
             .tell(new RegisterChangeListenerReply(listenerRegistration.path()),
                 getSelf());
     }
 
-    private void createTransactionChain() {
-        DOMStoreTransactionChain chain = store.createTransactionChain();
-        ActorRef transactionChain = getContext().actorOf(
-                ShardTransactionChain.props(chain, schemaContext, datastoreContext, shardMBean));
-        getSender().tell(new CreateTransactionChainReply(transactionChain.path()).toSerializable(),
-                getSelf());
-    }
-
     private boolean isMetricsCaptureEnabled(){
         CommonConfig config = new CommonConfig(getContext().system().settings().config());
         return config.isMetricCaptureEnabled();
     }
 
-    @Override protected void applyState(ActorRef clientActor, String identifier,
-        Object data) {
+    @Override
+    protected
+    void startLogRecoveryBatch(int maxBatchSize) {
+        currentLogRecoveryBatch = Lists.newArrayListWithCapacity(maxBatchSize);
+
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("{} : starting log recovery batch with max size {}", persistenceId(), maxBatchSize);
+        }
+    }
+
+    @Override
+    protected void appendRecoveredLogEntry(Payload data) {
+        if (data instanceof CompositeModificationPayload) {
+            currentLogRecoveryBatch.add(((CompositeModificationPayload) data).getModification());
+        } else {
+            LOG.error("Unknown state received {} during recovery", data);
+        }
+    }
+
+    @Override
+    protected void applyRecoverySnapshot(ByteString snapshot) {
+        if(recoveryCoordinator == null) {
+            recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
+        }
+
+        recoveryCoordinator.submit(snapshot, store.newWriteOnlyTransaction());
+
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("{} : submitted recovery sbapshot", persistenceId());
+        }
+    }
+
+    @Override
+    protected void applyCurrentLogRecoveryBatch() {
+        if(recoveryCoordinator == null) {
+            recoveryCoordinator = new ShardRecoveryCoordinator(persistenceId(), schemaContext);
+        }
+
+        recoveryCoordinator.submit(currentLogRecoveryBatch, store.newWriteOnlyTransaction());
+
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("{} : submitted log recovery batch with size {}", persistenceId(),
+                    currentLogRecoveryBatch.size());
+        }
+    }
+
+    @Override
+    protected void onRecoveryComplete() {
+        if(recoveryCoordinator != null) {
+            Collection<DOMStoreWriteTransaction> txList = recoveryCoordinator.getTransactions();
+
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("{} : recovery complete - committing {} Tx's", persistenceId(), txList.size());
+            }
+
+            for(DOMStoreWriteTransaction tx: txList) {
+                try {
+                    syncCommitTransaction(tx);
+                    shardMBean.incrementCommittedTransactionCount();
+                } catch (InterruptedException | ExecutionException e) {
+                    shardMBean.incrementFailedTransactionsCount();
+                    LOG.error(e, "Failed to commit");
+                }
+            }
+        }
+
+        recoveryCoordinator = null;
+        currentLogRecoveryBatch = null;
+        updateJournalStats();
+    }
+
+    @Override
+    protected void applyState(ActorRef clientActor, String identifier, Object data) {
 
         if (data instanceof CompositeModificationPayload) {
-            Object modification =
-                ((CompositeModificationPayload) data).getModification();
+            Object modification = ((CompositeModificationPayload) data).getModification();
 
             if (modification != null) {
                 commit(clientActor, modification);
             } else {
                 LOG.error(
                     "modification is null - this is very unexpected, clientActor = {}, identifier = {}",
-                    identifier, clientActor.path().toString());
+                    identifier, clientActor != null ? clientActor.path().toString() : null);
             }
 
         } else {
-            LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}", data, data.getClass().getClassLoader(), CompositeModificationPayload.class.getClassLoader());
+            LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}",
+                    data, data.getClass().getClassLoader(),
+                    CompositeModificationPayload.class.getClassLoader());
         }
 
-        // Update stats
+        updateJournalStats();
+
+    }
+
+    private void updateJournalStats() {
         ReplicatedLogEntry lastLogEntry = getLastLogEntry();
 
         if (lastLogEntry != null) {
@@ -482,10 +558,10 @@ public class Shard extends RaftActor {
 
         shardMBean.setCommitIndex(getCommitIndex());
         shardMBean.setLastApplied(getLastApplied());
-
     }
 
-    @Override protected void createSnapshot() {
+    @Override
+    protected void createSnapshot() {
         if (createSnapshotTransaction == null) {
 
             // Create a transaction. We are really going to treat the transaction as a worker
@@ -500,7 +576,9 @@ public class Shard extends RaftActor {
         }
     }
 
-    @VisibleForTesting @Override protected void applySnapshot(ByteString snapshot) {
+    @VisibleForTesting
+    @Override
+    protected void applySnapshot(ByteString snapshot) {
         // Since this will be done only on Recovery or when this actor is a Follower
         // we can safely commit everything in here. We not need to worry about event notifications
         // as they would have already been disabled on the follower
@@ -531,14 +609,17 @@ public class Shard extends RaftActor {
                 .tell(new EnableNotification(isLeader()), getSelf());
         }
 
-
         shardMBean.setRaftState(getRaftState().name());
         shardMBean.setCurrentTerm(getCurrentTerm());
 
         // If this actor is no longer the leader close all the transaction chains
         if(!isLeader()){
             for(Map.Entry<String, DOMStoreTransactionChain> entry : transactionChains.entrySet()){
-                LOG.debug("onStateChanged: Closing transaction chain {} because shard {} is no longer the leader", entry.getKey(), getId());
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug(
+                        "onStateChanged: Closing transaction chain {} because shard {} is no longer the leader",
+                        entry.getKey(), getId());
+                }
                 entry.getValue().close();
             }
 
@@ -547,10 +628,6 @@ public class Shard extends RaftActor {
     }
 
     @Override protected void onLeaderChanged(String oldLeader, String newLeader) {
-        if((oldLeader == null && newLeader == null) || (newLeader != null && newLeader.equals(oldLeader)) ){
-            return;
-        }
-        LOG.info("Current state = {}, Leader = {}", getRaftState().name(), newLeader);
         shardMBean.setLeader(newLeader);
     }
 
@@ -558,16 +635,6 @@ public class Shard extends RaftActor {
         return this.name.toString();
     }
 
-
-    private static class ShardConfigParams extends DefaultConfigParamsImpl {
-        public static final FiniteDuration HEART_BEAT_INTERVAL =
-            new FiniteDuration(500, TimeUnit.MILLISECONDS);
-
-        @Override public FiniteDuration getHeartBeatInterval() {
-            return HEART_BEAT_INTERVAL;
-        }
-    }
-
     private static class ShardCreator implements Creator<Shard> {
 
         private static final long serialVersionUID = 1L;
@@ -591,20 +658,24 @@ public class Shard extends RaftActor {
         }
     }
 
-    @VisibleForTesting NormalizedNode readStore() throws ExecutionException, InterruptedException {
+    @VisibleForTesting
+    NormalizedNode<?,?> readStore(YangInstanceIdentifier id)
+            throws ExecutionException, InterruptedException {
         DOMStoreReadTransaction transaction = store.newReadOnlyTransaction();
 
         CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future =
-            transaction.read(YangInstanceIdentifier.builder().build());
+            transaction.read(id);
 
-        NormalizedNode<?, ?> node = future.get().get();
+        Optional<NormalizedNode<?, ?>> optional = future.get();
+        NormalizedNode<?, ?> node = optional.isPresent()? optional.get() : null;
 
         transaction.close();
 
         return node;
     }
 
-    @VisibleForTesting void writeToStore(YangInstanceIdentifier id, NormalizedNode node)
+    @VisibleForTesting
+    void writeToStore(YangInstanceIdentifier id, NormalizedNode<?,?> node)
         throws ExecutionException, InterruptedException {
         DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
 
@@ -613,4 +684,8 @@ public class Shard extends RaftActor {
         syncCommitTransaction(transaction);
     }
 
+    @VisibleForTesting
+    ShardStats getShardMBean() {
+        return shardMBean;
+    }
 }
index 13ecaa5619614e133b8afe5124acc25a9bacfc84..a8a182380911db26f8c9530e363441d2d2b491cc 100644 (file)
@@ -15,11 +15,16 @@ import akka.actor.OneForOneStrategy;
 import akka.actor.Props;
 import akka.actor.SupervisorStrategy;
 import akka.cluster.ClusterEvent;
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
 import akka.japi.Creator;
 import akka.japi.Function;
+import akka.japi.Procedure;
+import akka.persistence.RecoveryCompleted;
+import akka.persistence.RecoveryFailure;
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
-
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfo;
@@ -32,14 +37,18 @@ import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolve
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
-import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import scala.concurrent.duration.Duration;
 
+import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 /**
  * The ShardManager has the following jobs,
@@ -50,7 +59,10 @@ import java.util.Map;
  * <li> Monitor the cluster members and store their addresses
  * <ul>
  */
-public class ShardManager extends AbstractUntypedActorWithMetering {
+public class ShardManager extends AbstractUntypedPersistentActor {
+
+    protected final LoggingAdapter LOG =
+        Logging.getLogger(getContext().system(), this);
 
     // Stores a mapping between a member name and the address of the member
     // Member names look like "member-1", "member-2" etc and are as specified
@@ -74,6 +86,8 @@ public class ShardManager extends AbstractUntypedActorWithMetering {
 
     private final DatastoreContext datastoreContext;
 
+    private final Collection<String> knownModules = new HashSet<>(128);
+
     /**
      * @param type defines the kind of data that goes into shards created by this shard manager. Examples of type would be
      *             configuration or operational
@@ -105,7 +119,7 @@ public class ShardManager extends AbstractUntypedActorWithMetering {
     }
 
     @Override
-    public void handleReceive(Object message) throws Exception {
+    public void handleCommand(Object message) throws Exception {
         if (message.getClass().equals(FindPrimary.SERIALIZABLE_CLASS)) {
             findPrimary(
                 FindPrimary.fromSerializable(message));
@@ -125,6 +139,23 @@ public class ShardManager extends AbstractUntypedActorWithMetering {
 
     }
 
+    @Override protected void handleRecover(Object message) throws Exception {
+
+        if(message instanceof SchemaContextModules){
+            SchemaContextModules msg = (SchemaContextModules) message;
+            knownModules.clear();
+            knownModules.addAll(msg.getModules());
+        } else if(message instanceof RecoveryFailure){
+            RecoveryFailure failure = (RecoveryFailure) message;
+            LOG.error(failure.cause(), "Recovery failed");
+        } else if(message instanceof RecoveryCompleted){
+            LOG.info("Recovery complete : {}", persistenceId());
+
+            // Delete all the messages from the akka journal except the last one
+            deleteMessages(lastSequenceNr() - 1);
+        }
+    }
+
     private void findLocalShard(FindLocalShard message) {
         ShardInformation shardInformation =
             localShards.get(message.getShardName());
@@ -159,16 +190,42 @@ public class ShardManager extends AbstractUntypedActorWithMetering {
      *
      * @param message
      */
-    private void updateSchemaContext(Object message) {
-        SchemaContext schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
+    private void updateSchemaContext(final Object message) {
+        final SchemaContext schemaContext = ((UpdateSchemaContext) message).getSchemaContext();
+
+        Set<ModuleIdentifier> allModuleIdentifiers = schemaContext.getAllModuleIdentifiers();
+        Set<String> newModules = new HashSet<>(128);
 
-        if(localShards.size() == 0){
-            createLocalShards(schemaContext);
+        for(ModuleIdentifier moduleIdentifier : allModuleIdentifiers){
+            String s = moduleIdentifier.getNamespace().toString();
+            newModules.add(s);
+        }
+
+        if(newModules.containsAll(knownModules)) {
+
+            LOG.info("New SchemaContext has a super set of current knownModules - persisting info");
+
+            knownModules.clear();
+            knownModules.addAll(newModules);
+
+            persist(new SchemaContextModules(newModules), new Procedure<SchemaContextModules>() {
+
+                @Override public void apply(SchemaContextModules param) throws Exception {
+                    LOG.info("Sending new SchemaContext to Shards");
+                    if (localShards.size() == 0) {
+                        createLocalShards(schemaContext);
+                    } else {
+                        for (ShardInformation info : localShards.values()) {
+                            info.getActor().tell(message, getSelf());
+                        }
+                    }
+                }
+
+            });
         } else {
-            for (ShardInformation info : localShards.values()) {
-                info.getActor().tell(message, getSelf());
-            }
+            LOG.info("Rejecting schema context update because it is not a super set of previously known modules");
         }
+
     }
 
     private void findPrimary(FindPrimary message) {
@@ -249,8 +306,8 @@ public class ShardManager extends AbstractUntypedActorWithMetering {
             ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
             Map<ShardIdentifier, String> peerAddresses = getPeerAddresses(shardName);
             ActorRef actor = getContext()
-                .actorOf(Shard.props(shardId, peerAddresses, datastoreContext, schemaContext).
-                    withMailbox(ActorContext.MAILBOX), shardId.toString());
+                .actorOf(Shard.props(shardId, peerAddresses, datastoreContext, schemaContext),
+                    shardId.toString());
             localShardActorNames.add(shardId.toString());
             localShards.put(shardName, new ShardInformation(shardName, actor, peerAddresses));
         }
@@ -306,6 +363,14 @@ public class ShardManager extends AbstractUntypedActorWithMetering {
 
     }
 
+    @Override public String persistenceId() {
+        return "shard-manager-" + type;
+    }
+
+    @VisibleForTesting public Collection<String> getKnownModules() {
+        return knownModules;
+    }
+
     private class ShardInformation {
         private final String shardName;
         private final ActorRef actor;
@@ -337,11 +402,11 @@ public class ShardManager extends AbstractUntypedActorWithMetering {
                 peerAddress);
             if(peerAddresses.containsKey(peerId)){
                 peerAddresses.put(peerId, peerAddress);
-
-                LOG.debug(
-                    "Sending PeerAddressResolved for peer {} with address {} to {}",
-                    peerId, peerAddress, actor.path());
-
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug(
+                        "Sending PeerAddressResolved for peer {} with address {} to {}",
+                        peerId, peerAddress, actor.path());
+                }
                 actor
                     .tell(new PeerAddressResolved(peerId, peerAddress),
                         getSelf());
@@ -371,6 +436,18 @@ public class ShardManager extends AbstractUntypedActorWithMetering {
             return new ShardManager(type, cluster, configuration, datastoreContext);
         }
     }
+
+    static class SchemaContextModules implements Serializable {
+        private final Set<String> modules;
+
+        SchemaContextModules(Set<String> modules){
+            this.modules = modules;
+        }
+
+        public Set<String> getModules() {
+            return modules;
+        }
+    }
 }
 
 
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardRecoveryCoordinator.java
new file mode 100644 (file)
index 0000000..8afdb4c
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
+import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import com.google.protobuf.ByteString;
+import com.google.protobuf.InvalidProtocolBufferException;
+
+/**
+ * Coordinates persistence recovery of journal log entries and snapshots for a shard. Each snapshot
+ * and journal log entry batch are de-serialized and applied to their own write transaction
+ * instance in parallel on a thread pool for faster recovery time. However the transactions are
+ * committed to the data store in the order the corresponding snapshot or log batch are received
+ * to preserve data store integrity.
+ *
+ * @author Thomas Panetelis
+ */
+class ShardRecoveryCoordinator {
+
+    private static final int TIME_OUT = 10;
+
+    private static final Logger LOG = LoggerFactory.getLogger(ShardRecoveryCoordinator.class);
+
+    private final List<DOMStoreWriteTransaction> resultingTxList = Lists.newArrayList();
+    private final SchemaContext schemaContext;
+    private final String shardName;
+    private final ExecutorService executor;
+
+    ShardRecoveryCoordinator(String shardName, SchemaContext schemaContext) {
+        this.schemaContext = schemaContext;
+        this.shardName = shardName;
+
+        executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(),
+                new ThreadFactoryBuilder().setDaemon(true)
+                        .setNameFormat("ShardRecovery-" + shardName + "-%d").build());
+    }
+
+    /**
+     * Submits a batch of journal log entries.
+     *
+     * @param logEntries the serialized journal log entries
+     * @param resultingTx the write Tx to which to apply the entries
+     */
+    void submit(List<Object> logEntries, DOMStoreWriteTransaction resultingTx) {
+        LogRecoveryTask task = new LogRecoveryTask(logEntries, resultingTx);
+        resultingTxList.add(resultingTx);
+        executor.execute(task);
+    }
+
+    /**
+     * Submits a snapshot.
+     *
+     * @param snapshot the serialized snapshot
+     * @param resultingTx the write Tx to which to apply the entries
+     */
+    void submit(ByteString snapshot, DOMStoreWriteTransaction resultingTx) {
+        SnapshotRecoveryTask task = new SnapshotRecoveryTask(snapshot, resultingTx);
+        resultingTxList.add(resultingTx);
+        executor.execute(task);
+    }
+
+    Collection<DOMStoreWriteTransaction> getTransactions() {
+        // Shutdown the executor and wait for task completion.
+        executor.shutdown();
+
+        try {
+            if(executor.awaitTermination(TIME_OUT, TimeUnit.MINUTES))  {
+                return resultingTxList;
+            } else {
+                LOG.error("Recovery for shard {} timed out after {} minutes", shardName, TIME_OUT);
+            }
+        } catch (InterruptedException e) {
+            Thread.currentThread().interrupt();
+        }
+
+        return Collections.emptyList();
+    }
+
+    private static abstract class ShardRecoveryTask implements Runnable {
+
+        final DOMStoreWriteTransaction resultingTx;
+
+        ShardRecoveryTask(DOMStoreWriteTransaction resultingTx) {
+            this.resultingTx = resultingTx;
+        }
+    }
+
+    private class LogRecoveryTask extends ShardRecoveryTask {
+
+        private final List<Object> logEntries;
+
+        LogRecoveryTask(List<Object> logEntries, DOMStoreWriteTransaction resultingTx) {
+            super(resultingTx);
+            this.logEntries = logEntries;
+        }
+
+        @Override
+        public void run() {
+            for(int i = 0; i < logEntries.size(); i++) {
+                MutableCompositeModification.fromSerializable(
+                        logEntries.get(i), schemaContext).apply(resultingTx);
+                // Null out to GC quicker.
+                logEntries.set(i, null);
+            }
+        }
+    }
+
+    private class SnapshotRecoveryTask extends ShardRecoveryTask {
+
+        private final ByteString snapshot;
+
+        SnapshotRecoveryTask(ByteString snapshot, DOMStoreWriteTransaction resultingTx) {
+            super(resultingTx);
+            this.snapshot = snapshot;
+        }
+
+        @Override
+        public void run() {
+            try {
+                NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(snapshot);
+                NormalizedNode<?, ?> node = new NormalizedNodeToNodeCodec(schemaContext).decode(
+                        YangInstanceIdentifier.builder().build(), serializedNode);
+
+                // delete everything first
+                resultingTx.delete(YangInstanceIdentifier.builder().build());
+
+                // Add everything from the remote node back
+                resultingTx.write(YangInstanceIdentifier.builder().build(), node);
+            } catch (InvalidProtocolBufferException e) {
+                LOG.error("Error deserializing snapshot", e);
+            }
+        }
+    }
+}
index b810ed9575a7af8d1f85a7b337639e2f3dfc72ca..f5ca6e3c5aa2334eb26c52408dd7279f08e33d3a 100644 (file)
@@ -105,7 +105,9 @@ public abstract class ShardTransaction extends AbstractUntypedActor {
             getSender().tell(new GetCompositeModificationReply(
                     new ImmutableCompositeModification(modification)), getSelf());
         } else if (message instanceof ReceiveTimeout) {
-            LOG.debug("Got ReceiveTimeout for inactivity - closing Tx");
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Got ReceiveTimeout for inactivity - closing Tx");
+            }
             closeTransaction(false);
         } else {
             throw new UnknownMessageException(message);
@@ -163,8 +165,9 @@ public abstract class ShardTransaction extends AbstractUntypedActor {
     protected void writeData(DOMStoreWriteTransaction transaction, WriteData message) {
         modification.addModification(
                 new WriteModification(message.getPath(), message.getData(),schemaContext));
-        LOG.debug("writeData at path : " + message.getPath().toString());
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("writeData at path : " + message.getPath().toString());
+        }
         try {
             transaction.write(message.getPath(), message.getData());
             getSender().tell(new WriteDataReply().toSerializable(), getSelf());
@@ -176,7 +179,9 @@ public abstract class ShardTransaction extends AbstractUntypedActor {
     protected void mergeData(DOMStoreWriteTransaction transaction, MergeData message) {
         modification.addModification(
                 new MergeModification(message.getPath(), message.getData(), schemaContext));
-        LOG.debug("mergeData at path : " + message.getPath().toString());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("mergeData at path : " + message.getPath().toString());
+        }
         try {
             transaction.merge(message.getPath(), message.getData());
             getSender().tell(new MergeDataReply().toSerializable(), getSelf());
@@ -186,7 +191,9 @@ public abstract class ShardTransaction extends AbstractUntypedActor {
     }
 
     protected void deleteData(DOMStoreWriteTransaction transaction, DeleteData message) {
-        LOG.debug("deleteData at path : " + message.getPath().toString());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("deleteData at path : " + message.getPath().toString());
+        }
         modification.addModification(new DeleteModification(message.getPath()));
         try {
             transaction.delete(message.getPath());
index e6ac7f8dbc368665d9cb13d623e531533f51e04a..0c3d33a78c4801002571e2b5dc0ab02f8ddae971 100644 (file)
@@ -25,7 +25,9 @@ public class TerminationMonitor extends UntypedActor{
     @Override public void onReceive(Object message) throws Exception {
         if(message instanceof Terminated){
             Terminated terminated = (Terminated) message;
-            LOG.debug("Actor terminated : {}", terminated.actor());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Actor terminated : {}", terminated.actor());
+            }
         } else if(message instanceof Monitor){
             Monitor monitor = (Monitor) message;
             getContext().watch(monitor.getActorRef());
index e3ae5dac7b7950f3fc300a3189bc83922df24033..df85bb136a93b51084676a39cb1d7b53b54b7037 100644 (file)
@@ -101,7 +101,9 @@ public class ThreePhaseCommitCohort extends AbstractUntypedActor {
 
     private void commit(CommitTransaction message) {
         // Forward the commit to the shard
-        log.debug("Forward commit transaction to Shard {} ", shardActor);
+        if(log.isDebugEnabled()) {
+            log.debug("Forward commit transaction to Shard {} ", shardActor);
+        }
         shardActor.forward(new ForwardedCommitTransaction(cohort, modification),
             getContext());
 
index a5be69531d73ede89f7bba5978a85d2e045d8989..a7a5b31b174e4e0d03db192aa367a43ebe67ad62 100644 (file)
@@ -65,9 +65,10 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
             @Override
             public Void apply(Iterable<ActorPath> paths) {
                 cohortPaths = Lists.newArrayList(paths);
-
-                LOG.debug("Tx {} successfully built cohort path list: {}",
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("Tx {} successfully built cohort path list: {}",
                         transactionId, cohortPaths);
+                }
                 return null;
             }
         }, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getActorSystem().dispatcher());
@@ -75,8 +76,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
 
     @Override
     public ListenableFuture<Boolean> canCommit() {
-        LOG.debug("Tx {} canCommit", transactionId);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} canCommit", transactionId);
+        }
         final SettableFuture<Boolean> returnFuture = SettableFuture.create();
 
         // The first phase of canCommit is to gather the list of cohort actor paths that will
@@ -89,7 +91,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
             @Override
             public void onComplete(Throwable failure, Void notUsed) throws Throwable {
                 if(failure != null) {
-                    LOG.debug("Tx {}: a cohort path Future failed: {}", transactionId, failure);
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("Tx {}: a cohort path Future failed: {}", transactionId, failure);
+                    }
                     returnFuture.setException(failure);
                 } else {
                     finishCanCommit(returnFuture);
@@ -101,9 +105,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
     }
 
     private void finishCanCommit(final SettableFuture<Boolean> returnFuture) {
-
-        LOG.debug("Tx {} finishCanCommit", transactionId);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} finishCanCommit", transactionId);
+        }
         // The last phase of canCommit is to invoke all the cohort actors asynchronously to perform
         // their canCommit processing. If any one fails then we'll fail canCommit.
 
@@ -114,7 +118,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
             @Override
             public void onComplete(Throwable failure, Iterable<Object> responses) throws Throwable {
                 if(failure != null) {
-                    LOG.debug("Tx {}: a canCommit cohort Future failed: {}", transactionId, failure);
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("Tx {}: a canCommit cohort Future failed: {}", transactionId, failure);
+                    }
                     returnFuture.setException(failure);
                     return;
                 }
@@ -135,9 +141,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
                         return;
                     }
                 }
-
-                LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result);
-
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result);
+                }
                 returnFuture.set(Boolean.valueOf(result));
             }
         }, actorContext.getActorSystem().dispatcher());
@@ -146,9 +152,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
     private Future<Iterable<Object>> invokeCohorts(Object message) {
         List<Future<Object>> futureList = Lists.newArrayListWithCapacity(cohortPaths.size());
         for(ActorPath actorPath : cohortPaths) {
-
-            LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, actorPath);
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, actorPath);
+            }
             ActorSelection cohort = actorContext.actorSelection(actorPath);
 
             futureList.add(actorContext.executeRemoteOperationAsync(cohort, message));
@@ -184,8 +190,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
     private ListenableFuture<Void> voidOperation(final String operationName, final Object message,
             final Class<?> expectedResponseClass, final boolean propagateException) {
 
-        LOG.debug("Tx {} {}", transactionId, operationName);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} {}", transactionId, operationName);
+        }
         final SettableFuture<Void> returnFuture = SettableFuture.create();
 
         // The cohort actor list should already be built at this point by the canCommit phase but,
@@ -199,9 +206,10 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
                 @Override
                 public void onComplete(Throwable failure, Void notUsed) throws Throwable {
                     if(failure != null) {
-                        LOG.debug("Tx {}: a {} cohort path Future failed: {}", transactionId,
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug("Tx {}: a {} cohort path Future failed: {}", transactionId,
                                 operationName, failure);
-
+                        }
                         if(propagateException) {
                             returnFuture.setException(failure);
                         } else {
@@ -221,9 +229,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
     private void finishVoidOperation(final String operationName, final Object message,
             final Class<?> expectedResponseClass, final boolean propagateException,
             final SettableFuture<Void> returnFuture) {
-
-        LOG.debug("Tx {} finish {}", transactionId, operationName);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} finish {}", transactionId, operationName);
+        }
         Future<Iterable<Object>> combinedFuture = invokeCohorts(message);
 
         combinedFuture.onComplete(new OnComplete<Iterable<Object>>() {
@@ -243,9 +251,10 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
                 }
 
                 if(exceptionToPropagate != null) {
-                    LOG.debug("Tx {}: a {} cohort Future failed: {}", transactionId,
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("Tx {}: a {} cohort Future failed: {}", transactionId,
                             operationName, exceptionToPropagate);
-
+                    }
                     if(propagateException) {
                         // We don't log the exception here to avoid redundant logging since we're
                         // propagating to the caller in MD-SAL core who will log it.
@@ -254,12 +263,16 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
                         // Since the caller doesn't want us to propagate the exception we'll also
                         // not log it normally. But it's usually not good to totally silence
                         // exceptions so we'll log it to debug level.
-                        LOG.debug(String.format("%s failed",  message.getClass().getSimpleName()),
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug(String.format("%s failed", message.getClass().getSimpleName()),
                                 exceptionToPropagate);
+                        }
                         returnFuture.set(null);
                     }
                 } else {
-                    LOG.debug("Tx {}: {} succeeded", transactionId, operationName);
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("Tx {}: {} succeeded", transactionId, operationName);
+                    }
                     returnFuture.set(null);
                 }
             }
index 97a9ff0bf379ef3b8f6568ed37603ed285ba35a5..6cf16b44268c6c16e26e0658632f61994ee33971 100644 (file)
@@ -224,8 +224,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
                 new TransactionProxyCleanupPhantomReference(this);
             phantomReferenceCache.put(cleanup, cleanup);
         }
-
-        LOG.debug("Created txn {} of type {}", identifier, transactionType);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Created txn {} of type {}", identifier, transactionType);
+        }
     }
 
     @Override
@@ -235,8 +236,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
                 "Read operation on write-only transaction is not allowed");
 
-        LOG.debug("Tx {} read {}", identifier, path);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} read {}", identifier, path);
+        }
         createTransactionIfMissing(actorContext, path);
 
         return transactionContext(path).readData(path);
@@ -248,8 +250,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
                 "Exists operation on write-only transaction is not allowed");
 
-        LOG.debug("Tx {} exists {}", identifier, path);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} exists {}", identifier, path);
+        }
         createTransactionIfMissing(actorContext, path);
 
         return transactionContext(path).dataExists(path);
@@ -267,8 +270,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         checkModificationState();
 
-        LOG.debug("Tx {} write {}", identifier, path);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} write {}", identifier, path);
+        }
         createTransactionIfMissing(actorContext, path);
 
         transactionContext(path).writeData(path, data);
@@ -279,8 +283,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         checkModificationState();
 
-        LOG.debug("Tx {} merge {}", identifier, path);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} merge {}", identifier, path);
+        }
         createTransactionIfMissing(actorContext, path);
 
         transactionContext(path).mergeData(path, data);
@@ -290,9 +295,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
     public void delete(YangInstanceIdentifier path) {
 
         checkModificationState();
-
-        LOG.debug("Tx {} delete {}", identifier, path);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} delete {}", identifier, path);
+        }
         createTransactionIfMissing(actorContext, path);
 
         transactionContext(path).deleteData(path);
@@ -305,16 +310,18 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         inReadyState = true;
 
-        LOG.debug("Tx {} Trying to get {} transactions ready for commit", identifier,
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} Trying to get {} transactions ready for commit", identifier,
                 remoteTransactionPaths.size());
-
+        }
         List<Future<ActorPath>> cohortPathFutures = Lists.newArrayList();
 
         for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
 
-            LOG.debug("Tx {} Readying transaction for shard {}", identifier,
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} Readying transaction for shard {}", identifier,
                     transactionContext.getShardName());
-
+            }
             cohortPathFutures.add(transactionContext.readyTransaction());
         }
 
@@ -381,8 +388,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
                 String transactionPath = reply.getTransactionPath();
 
-                LOG.debug("Tx {} Received transaction path = {}", identifier, transactionPath);
-
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("Tx {} Received transaction path = {}", identifier, transactionPath);
+                }
                 ActorSelection transactionActor = actorContext.actorSelection(transactionPath);
 
                 if (transactionType == TransactionType.READ_ONLY) {
@@ -404,7 +412,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
                     "Invalid reply type {} for CreateTransaction", response.getClass()));
             }
         } catch (Exception e) {
-            LOG.debug("Tx {} Creating NoOpTransaction because of : {}", identifier, e.getMessage());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} Creating NoOpTransaction because of : {}", identifier, e.getMessage());
+            }
             remoteTransactionPaths
                 .put(shardName, new NoOpTransactionContext(shardName, e, identifier));
         }
@@ -489,15 +499,18 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         @Override
         public void closeTransaction() {
-            LOG.debug("Tx {} closeTransaction called", identifier);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} closeTransaction called", identifier);
+            }
             actorContext.sendRemoteOperationAsync(getActor(), new CloseTransaction().toSerializable());
         }
 
         @Override
         public Future<ActorPath> readyTransaction() {
-            LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
                     identifier, recordedOperationFutures.size());
-
+            }
             // Send the ReadyTransaction message to the Tx actor.
 
             final Future<Object> replyFuture = actorContext.executeRemoteOperationAsync(getActor(),
@@ -522,10 +535,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             return combinedFutures.transform(new AbstractFunction1<Iterable<Object>, ActorPath>() {
                 @Override
                 public ActorPath apply(Iterable<Object> notUsed) {
-
-                    LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
                             identifier);
-
+                    }
                     // At this point all the Futures succeeded and we need to extract the cohort
                     // actor path from the ReadyTransactionReply. For the recorded operations, they
                     // don't return any data so we're only interested that they completed
@@ -543,9 +556,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
                         String resolvedCohortPath = getResolvedCohortPath(
                                 reply.getCohortPath().toString());
 
-                        LOG.debug("Tx {} readyTransaction: resolved cohort path {}",
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug("Tx {} readyTransaction: resolved cohort path {}",
                                 identifier, resolvedCohortPath);
-
+                        }
                         return actorContext.actorFor(resolvedCohortPath);
                     } else {
                         // Throwing an exception here will fail the Future.
@@ -559,21 +573,27 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         @Override
         public void deleteData(YangInstanceIdentifier path) {
-            LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+            }
             recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
                     new DeleteData(path).toSerializable() ));
         }
 
         @Override
         public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-            LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+            }
             recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
                     new MergeData(path, data, schemaContext).toSerializable()));
         }
 
         @Override
         public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-            LOG.debug("Tx {} writeData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} writeData called path = {}", identifier, path);
+            }
             recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
                     new WriteData(path, data, schemaContext).toSerializable()));
         }
@@ -582,8 +602,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
                 final YangInstanceIdentifier path) {
 
-            LOG.debug("Tx {} readData called path = {}", identifier, path);
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} readData called path = {}", identifier, path);
+            }
             final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture = SettableFuture.create();
 
             // If there were any previous recorded put/merge/delete operation reply Futures then we
@@ -593,9 +614,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             if(recordedOperationFutures.isEmpty()) {
                 finishReadData(path, returnFuture);
             } else {
-                LOG.debug("Tx {} readData: verifying {} previous recorded operations",
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("Tx {} readData: verifying {} previous recorded operations",
                         identifier, recordedOperationFutures.size());
-
+                }
                 // Note: we make a copy of recordedOperationFutures to be on the safe side in case
                 // Futures#sequence accesses the passed List on a different thread, as
                 // recordedOperationFutures is not synchronized.
@@ -608,9 +630,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
                     public void onComplete(Throwable failure, Iterable<Object> notUsed)
                             throws Throwable {
                         if(failure != null) {
-                            LOG.debug("Tx {} readData: a recorded operation failed: {}",
+                            if(LOG.isDebugEnabled()) {
+                                LOG.debug("Tx {} readData: a recorded operation failed: {}",
                                     identifier, failure);
-
+                            }
                             returnFuture.setException(new ReadFailedException(
                                     "The read could not be performed because a previous put, merge,"
                                     + "or delete operation failed", failure));
@@ -629,20 +652,23 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         private void finishReadData(final YangInstanceIdentifier path,
                 final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture) {
 
-            LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
+            }
             OnComplete<Object> onComplete = new OnComplete<Object>() {
                 @Override
                 public void onComplete(Throwable failure, Object readResponse) throws Throwable {
                     if(failure != null) {
-                        LOG.debug("Tx {} read operation failed: {}", identifier, failure);
-
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug("Tx {} read operation failed: {}", identifier, failure);
+                        }
                         returnFuture.setException(new ReadFailedException(
                                 "Error reading data for path " + path, failure));
 
                     } else {
-                        LOG.debug("Tx {} read operation succeeded", identifier, failure);
-
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug("Tx {} read operation succeeded", identifier, failure);
+                        }
                         if (readResponse.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
                             ReadDataReply reply = ReadDataReply.fromSerializable(schemaContext,
                                     path, readResponse);
@@ -669,8 +695,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         public CheckedFuture<Boolean, ReadFailedException> dataExists(
                 final YangInstanceIdentifier path) {
 
-            LOG.debug("Tx {} dataExists called path = {}", identifier, path);
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+            }
             final SettableFuture<Boolean> returnFuture = SettableFuture.create();
 
             // If there were any previous recorded put/merge/delete operation reply Futures then we
@@ -681,9 +708,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             if(recordedOperationFutures.isEmpty()) {
                 finishDataExists(path, returnFuture);
             } else {
-                LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
                         identifier, recordedOperationFutures.size());
-
+                }
                 // Note: we make a copy of recordedOperationFutures to be on the safe side in case
                 // Futures#sequence accesses the passed List on a different thread, as
                 // recordedOperationFutures is not synchronized.
@@ -696,9 +724,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
                     public void onComplete(Throwable failure, Iterable<Object> notUsed)
                             throws Throwable {
                         if(failure != null) {
-                            LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
+                            if(LOG.isDebugEnabled()) {
+                                LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
                                     identifier, failure);
-
+                            }
                             returnFuture.setException(new ReadFailedException(
                                     "The data exists could not be performed because a previous "
                                     + "put, merge, or delete operation failed", failure));
@@ -717,19 +746,22 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         private void finishDataExists(final YangInstanceIdentifier path,
                 final SettableFuture<Boolean> returnFuture) {
 
-            LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
+            }
             OnComplete<Object> onComplete = new OnComplete<Object>() {
                 @Override
                 public void onComplete(Throwable failure, Object response) throws Throwable {
                     if(failure != null) {
-                        LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
-
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
+                        }
                         returnFuture.setException(new ReadFailedException(
                                 "Error checking data exists for path " + path, failure));
                     } else {
-                        LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
-
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
+                        }
                         if (response.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
                             returnFuture.set(Boolean.valueOf(DataExistsReply.
                                         fromSerializable(response).exists()));
@@ -761,34 +793,46 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         @Override
         public void closeTransaction() {
-            LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
+            }
         }
 
         @Override
         public Future<ActorPath> readyTransaction() {
-            LOG.debug("Tx {} readyTransaction called", identifier);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} readyTransaction called", identifier);
+            }
             return akka.dispatch.Futures.failed(failure);
         }
 
         @Override
         public void deleteData(YangInstanceIdentifier path) {
-            LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+            }
         }
 
         @Override
         public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-            LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+            }
         }
 
         @Override
         public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-            LOG.debug("Tx {} writeData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} writeData called path = {}", identifier, path);
+            }
         }
 
         @Override
         public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
             YangInstanceIdentifier path) {
-            LOG.debug("Tx {} readData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} readData called path = {}", identifier, path);
+            }
             return Futures.immediateFailedCheckedFuture(new ReadFailedException(
                     "Error reading data for path " + path, failure));
         }
@@ -796,7 +840,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         @Override
         public CheckedFuture<Boolean, ReadFailedException> dataExists(
             YangInstanceIdentifier path) {
-            LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+            }
             return Futures.immediateFailedCheckedFuture(new ReadFailedException(
                     "Error checking exists for path " + path, failure));
         }
index 74a91d08cf4373918f069cc3cae44b665c146a9d..0959c2a95949a6332fd66859f0796e103746c5d8 100644 (file)
@@ -74,16 +74,14 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
     }
 
     public void setDataStoreExecutor(ExecutorService dsExecutor) {
-        this.dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dsExecutor,
-                "notification-executor", getMBeanType(), getMBeanCategory());
+        this.dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dsExecutor);
     }
 
     public void setNotificationManager(QueuedNotificationManager<?, ?> manager) {
         this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
                 "notification-manager", getMBeanType(), getMBeanCategory());
 
-        this.notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor(),
-                "data-store-executor", getMBeanType(), getMBeanCategory());
+        this.notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor());
     }
 
     @Override
@@ -230,7 +228,8 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
 
     @Override
     public ThreadExecutorStats getDataStoreExecutorStats() {
-        return dataStoreExecutorStatsBean.toThreadExecutorStats();
+        return dataStoreExecutorStatsBean == null ? null :
+                                        dataStoreExecutorStatsBean.toThreadExecutorStats();
     }
 
     @Override
index 7b5588cb196a66fa68de947fecc58137977275ae..8ba333d2799a5177c7b1b8b5b09ab6b4ec87d126 100644 (file)
@@ -125,8 +125,9 @@ public class ActorContext {
         if (result instanceof LocalShardFound) {
             LocalShardFound found = (LocalShardFound) result;
 
-            LOG.debug("Local shard found {}", found.getPath());
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Local shard found {}", found.getPath());
+            }
             return found.getPath();
         }
 
@@ -141,8 +142,9 @@ public class ActorContext {
         if (result.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
             PrimaryFound found = PrimaryFound.fromSerializable(result);
 
-            LOG.debug("Primary found {}", found.getPrimaryPath());
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Primary found {}", found.getPrimaryPath());
+            }
             return found.getPrimaryPath();
         }
         throw new PrimaryNotFoundException("Could not find primary for shardName " + shardName);
@@ -175,9 +177,10 @@ public class ActorContext {
      */
     public Object executeRemoteOperation(ActorSelection actor, Object message) {
 
-        LOG.debug("Sending remote message {} to {}", message.getClass().toString(),
-            actor.toString());
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Sending remote message {} to {}", message.getClass().toString(),
+                actor.toString());
+        }
         Future<Object> future = ask(actor, message, operationTimeout);
 
         try {
@@ -197,8 +200,9 @@ public class ActorContext {
      */
     public Future<Object> executeRemoteOperationAsync(ActorSelection actor, Object message) {
 
-        LOG.debug("Sending remote message {} to {}", message.getClass().toString(), actor.toString());
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Sending remote message {} to {}", message.getClass().toString(), actor.toString());
+        }
         return ask(actor, message, operationTimeout);
     }
 
index e7a7aab406677c53713e33b92d04f0c3a03f29aa..84614bd7bb43a0bbf43f3fe055673f64fe2cd10d 100644 (file)
@@ -42,13 +42,16 @@ public class DistributedConfigDataStoreProviderModule extends
 
         DatastoreContext datastoreContext = new DatastoreContext("DistributedConfigDatastore",
                 InMemoryDOMDataStoreConfigProperties.create(
-                        props.getMaxShardDataChangeExecutorPoolSize().getValue(),
-                        props.getMaxShardDataChangeExecutorQueueSize().getValue(),
-                        props.getMaxShardDataChangeListenerQueueSize().getValue(),
-                        props.getMaxShardDataStoreExecutorQueueSize().getValue()),
+                        props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
+                        props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
+                        props.getMaxShardDataChangeListenerQueueSize().getValue().intValue(),
+                        props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue()),
                 Duration.create(props.getShardTransactionIdleTimeoutInMinutes().getValue(),
                         TimeUnit.MINUTES),
-                props.getOperationTimeoutInSeconds().getValue());
+                props.getOperationTimeoutInSeconds().getValue(),
+                props.getShardJournalRecoveryLogBatchSize().getValue().intValue(),
+                props.getShardSnapshotBatchCount().getValue().intValue(),
+                props.getShardHearbeatIntervalInMillis().getValue());
 
         return DistributedDataStoreFactory.createInstance("config", getConfigSchemaServiceDependency(),
                 datastoreContext, bundleContext);
index 814e6f606ac00bc311eb191c81497e46b56e2357..3183527eb032d287623c03e1a4fc57bca9252f2d 100644 (file)
@@ -42,13 +42,16 @@ public class DistributedOperationalDataStoreProviderModule extends
 
         DatastoreContext datastoreContext = new DatastoreContext("DistributedOperationalDatastore",
                 InMemoryDOMDataStoreConfigProperties.create(
-                        props.getMaxShardDataChangeExecutorPoolSize().getValue(),
-                        props.getMaxShardDataChangeExecutorQueueSize().getValue(),
-                        props.getMaxShardDataChangeListenerQueueSize().getValue(),
-                        props.getMaxShardDataStoreExecutorQueueSize().getValue()),
+                        props.getMaxShardDataChangeExecutorPoolSize().getValue().intValue(),
+                        props.getMaxShardDataChangeExecutorQueueSize().getValue().intValue(),
+                        props.getMaxShardDataChangeListenerQueueSize().getValue().intValue(),
+                        props.getMaxShardDataStoreExecutorQueueSize().getValue().intValue()),
                 Duration.create(props.getShardTransactionIdleTimeoutInMinutes().getValue(),
                         TimeUnit.MINUTES),
-                props.getOperationTimeoutInSeconds().getValue());
+                props.getOperationTimeoutInSeconds().getValue(),
+                props.getShardJournalRecoveryLogBatchSize().getValue().intValue(),
+                props.getShardSnapshotBatchCount().getValue().intValue(),
+                props.getShardHearbeatIntervalInMillis().getValue());
 
         return DistributedDataStoreFactory.createInstance("operational",
                 getOperationalSchemaServiceDependency(), datastoreContext, bundleContext);
index e19a76703f69f61ec8df33decddf483cfc6e7192..af43f953ffb52ffdd24bd892fe60d8369a0d74da 100644 (file)
@@ -36,8 +36,8 @@ module distributed-datastore-provider {
                 config:java-name-prefix DistributedOperationalDataStoreProvider;
      }
 
-    typedef non-zero-uint16-type {
-        type uint16 {
+    typedef non-zero-uint32-type {
+        type uint32 {
             range "1..max";
         }
     }
@@ -48,43 +48,67 @@ module distributed-datastore-provider {
         }
     }
 
+    typedef heartbeat-interval-type {
+        type uint16 {
+            range "100..max";
+        }
+    }
+
     grouping data-store-properties {
         leaf max-shard-data-change-executor-queue-size {
             default 1000;
-            type non-zero-uint16-type;
+            type non-zero-uint32-type;
             description "The maximum queue size for each shard's data store data change notification executor.";
          }
 
          leaf max-shard-data-change-executor-pool-size {
             default 20;
-            type non-zero-uint16-type;
+            type non-zero-uint32-type;
             description "The maximum thread pool size for each shard's data store data change notification executor.";
          }
 
          leaf max-shard-data-change-listener-queue-size {
             default 1000;
-            type non-zero-uint16-type;
+            type non-zero-uint32-type;
             description "The maximum queue size for each shard's data store data change listeners.";
          }
 
          leaf max-shard-data-store-executor-queue-size {
             default 5000;
-            type non-zero-uint16-type;
+            type non-zero-uint32-type;
             description "The maximum queue size for each shard's data store executor.";
          }
 
          leaf shard-transaction-idle-timeout-in-minutes {
             default 10;
-            type non-zero-uint16-type;
+            type non-zero-uint32-type;
             description "The maximum amount of time a shard transaction can be idle without receiving any messages before it self-destructs.";
          }
 
+         leaf shard-snapshot-batch-count {
+            default 20000;
+            type non-zero-uint32-type;
+            description "The minimum number of entries to be present in the in-memory journal log before a snapshot to be taken.";
+         }
+
+         leaf shard-hearbeat-interval-in-millis {
+            default 500;
+            type heartbeat-interval-type;
+            description "The interval at which a shard will send a heart beat message to its remote shard.";
+         }
+
          leaf operation-timeout-in-seconds {
             default 5;
             type operation-timeout-type;
             description "The maximum amount of time for akka operations (remote or local) to complete before failing.";
          }
 
+         leaf shard-journal-recovery-log-batch-size {
+            default 5000;
+            type non-zero-uint32-type;
+            description "The maximum number of journal log entries to batch on recovery for a shard before committing to the data store.";
+         }
+
          leaf enable-metric-capture {
             default false;
             type boolean;
@@ -93,7 +117,7 @@ module distributed-datastore-provider {
 
          leaf bounded-mailbox-capacity {
              default 1000;
-             type non-zero-uint16-type;
+             type non-zero-uint32-type;
              description "Max queue size that an actor's mailbox can reach";
          }
     }
index 022ef9bbafef949921ec24041357cff64013ea12..fae21f27091a2382f3e1feabf7a3c26ce7004ba5 100644 (file)
@@ -10,11 +10,10 @@ package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorSystem;
 import akka.testkit.JavaTestKit;
-import org.apache.commons.io.FileUtils;
+
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 
-import java.io.File;
 import java.io.IOException;
 
 public abstract class AbstractActorTest {
@@ -25,35 +24,15 @@ public abstract class AbstractActorTest {
 
         System.setProperty("shard.persistent", "false");
         system = ActorSystem.create("test");
-
-        deletePersistenceFiles();
     }
 
     @AfterClass
     public static void tearDownClass() throws IOException {
         JavaTestKit.shutdownActorSystem(system);
         system = null;
-
-        deletePersistenceFiles();
-    }
-
-    protected static void deletePersistenceFiles() throws IOException {
-        File journal = new File("journal");
-
-        if(journal.exists()) {
-            FileUtils.deleteDirectory(journal);
-        }
-
-        File snapshots = new File("snapshots");
-
-        if(snapshots.exists()){
-            FileUtils.deleteDirectory(snapshots);
-        }
-
     }
 
     protected ActorSystem getSystem() {
         return system;
     }
-
 }
index 02201f7cd1672d2c8d02415ae4d65d8b71432f8e..8a3cdd0c8aa3b9890811c8a52318c8c18051d7b8 100644 (file)
@@ -3,10 +3,23 @@ package org.opendaylight.controller.cluster.datastore;
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.Props;
+import akka.dispatch.Futures;
+import akka.japi.Procedure;
+import akka.persistence.PersistentConfirmation;
+import akka.persistence.PersistentId;
+import akka.persistence.PersistentImpl;
+import akka.persistence.PersistentRepr;
+import akka.persistence.journal.japi.AsyncWriteJournal;
 import akka.testkit.JavaTestKit;
 import akka.testkit.TestActorRef;
-import junit.framework.Assert;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Maps;
+import com.google.common.util.concurrent.Uninterruptibles;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import com.typesafe.config.ConfigValueFactory;
 import org.junit.AfterClass;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
@@ -19,17 +32,41 @@ import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContex
 import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
 import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import scala.concurrent.duration.Duration;
+import org.opendaylight.yangtools.yang.model.api.ModuleIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.concurrent.Future;
+
+import java.net.URI;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
 
 import static junit.framework.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 public class ShardManagerTest {
     private static ActorSystem system;
 
     @BeforeClass
-    public static void setUp() {
-        system = ActorSystem.create("test");
+    public static void setUpClass() {
+        Map<String, String> myJournal = new HashMap<>();
+        myJournal.put("class", "org.opendaylight.controller.cluster.datastore.ShardManagerTest$MyJournal");
+        myJournal.put("plugin-dispatcher", "akka.actor.default-dispatcher");
+        Config config = ConfigFactory.load()
+            .withValue("akka.persistence.journal.plugin",
+                ConfigValueFactory.fromAnyRef("my-journal"))
+            .withValue("my-journal", ConfigValueFactory.fromMap(myJournal));
+
+        MyJournal.clear();
+
+        system = ActorSystem.create("test", config);
     }
 
     @AfterClass
@@ -38,29 +75,27 @@ public class ShardManagerTest {
         system = null;
     }
 
+    @Before
+    public void setUpTest(){
+        MyJournal.clear();
+    }
+
     @Test
     public void testOnReceiveFindPrimaryForNonExistentShard() throws Exception {
 
-        new JavaTestKit(system) {{
-            final Props props = ShardManager
-                .props("config", new MockClusterWrapper(),
-                    new MockConfiguration(), new DatastoreContext());
-            final TestActorRef<ShardManager> subject =
-                TestActorRef.create(system, props);
+        new JavaTestKit(system) {
+            {
+                final Props props = ShardManager
+                    .props("config", new MockClusterWrapper(),
+                        new MockConfiguration(), new DatastoreContext());
 
-            new Within(duration("10 seconds")) {
-                @Override
-                protected void run() {
+                final ActorRef subject = getSystem().actorOf(props);
 
-                    subject.tell(new FindPrimary("inventory").toSerializable(), getRef());
+                subject.tell(new FindPrimary("inventory").toSerializable(), getRef());
 
-                    expectMsgEquals(Duration.Zero(),
-                        new PrimaryNotFound("inventory").toSerializable());
-
-                    expectNoMsg();
-                }
-            };
-        }};
+                expectMsgEquals(duration("2 seconds"),
+                    new PrimaryNotFound("inventory").toSerializable());
+            }};
     }
 
     @Test
@@ -70,22 +105,14 @@ public class ShardManagerTest {
             final Props props = ShardManager
                 .props("config", new MockClusterWrapper(),
                     new MockConfiguration(), new DatastoreContext());
-            final TestActorRef<ShardManager> subject =
-                TestActorRef.create(system, props);
-
-            subject.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
 
-            new Within(duration("10 seconds")) {
-                @Override
-                protected void run() {
+            final ActorRef subject = getSystem().actorOf(props);
 
-                    subject.tell(new FindPrimary(Shard.DEFAULT_NAME).toSerializable(), getRef());
+            subject.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
 
-                    expectMsgClass(duration("1 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
+            subject.tell(new FindPrimary(Shard.DEFAULT_NAME).toSerializable(), getRef());
 
-                    expectNoMsg();
-                }
-            };
+            expectMsgClass(duration("1 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
         }};
     }
 
@@ -96,31 +123,23 @@ public class ShardManagerTest {
             final Props props = ShardManager
                 .props("config", new MockClusterWrapper(),
                     new MockConfiguration(), new DatastoreContext());
-            final TestActorRef<ShardManager> subject =
-                TestActorRef.create(system, props);
 
-            new Within(duration("10 seconds")) {
-                @Override
-                protected void run() {
-
-                    subject.tell(new FindLocalShard("inventory"), getRef());
-
-                    final String out = new ExpectMsg<String>(duration("10 seconds"), "find local") {
-                        @Override
-                        protected String match(Object in) {
-                            if (in instanceof LocalShardNotFound) {
-                                return ((LocalShardNotFound) in).getShardName();
-                            } else {
-                                throw noMatch();
-                            }
-                        }
-                    }.get(); // this extracts the received message
+            final ActorRef subject = getSystem().actorOf(props);
 
-                    assertEquals("inventory", out);
+            subject.tell(new FindLocalShard("inventory"), getRef());
 
-                    expectNoMsg();
+            final String out = new ExpectMsg<String>(duration("3 seconds"), "find local") {
+                @Override
+                protected String match(Object in) {
+                    if (in instanceof LocalShardNotFound) {
+                        return ((LocalShardNotFound) in).getShardName();
+                    } else {
+                        throw noMatch();
+                    }
                 }
-            };
+            }.get(); // this extracts the received message
+
+            assertEquals("inventory", out);
         }};
     }
 
@@ -133,40 +152,109 @@ public class ShardManagerTest {
             final Props props = ShardManager
                 .props("config", mockClusterWrapper,
                     new MockConfiguration(), new DatastoreContext());
-            final TestActorRef<ShardManager> subject =
-                TestActorRef.create(system, props);
+
+            final ActorRef subject = getSystem().actorOf(props);
 
             subject.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
 
-            new Within(duration("10 seconds")) {
+            subject.tell(new FindLocalShard(Shard.DEFAULT_NAME), getRef());
+
+            final ActorRef out = new ExpectMsg<ActorRef>(duration("3 seconds"), "find local") {
                 @Override
-                protected void run() {
-
-                    subject.tell(new FindLocalShard(Shard.DEFAULT_NAME), getRef());
-
-                    final ActorRef out = new ExpectMsg<ActorRef>(duration("10 seconds"), "find local") {
-                        @Override
-                        protected ActorRef match(Object in) {
-                            if (in instanceof LocalShardFound) {
-                                return ((LocalShardFound) in).getPath();
-                            } else {
-                                throw noMatch();
-                            }
-                        }
-                    }.get(); // this extracts the received message
+                protected ActorRef match(Object in) {
+                    if (in instanceof LocalShardFound) {
+                        return ((LocalShardFound) in).getPath();
+                    } else {
+                        throw noMatch();
+                    }
+                }
+            }.get(); // this extracts the received message
+
+            assertTrue(out.path().toString(),
+                out.path().toString().contains("member-1-shard-default-config"));
+        }};
+    }
+
+    @Test
+    public void testOnReceiveMemberUp() throws Exception {
+
+        new JavaTestKit(system) {{
+            final Props props = ShardManager
+                .props("config", new MockClusterWrapper(),
+                    new MockConfiguration(), new DatastoreContext());
 
-                    assertTrue(out.path().toString(), out.path().toString().contains("member-1-shard-default-config"));
+            final ActorRef subject = getSystem().actorOf(props);
 
+            MockClusterWrapper.sendMemberUp(subject, "member-2", getRef().path().toString());
 
-                    expectNoMsg();
+            subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+
+            final String out = new ExpectMsg<String>(duration("3 seconds"), "primary found") {
+                // do not put code outside this method, will run afterwards
+                @Override
+                protected String match(Object in) {
+                    if (in.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
+                        PrimaryFound f = PrimaryFound.fromSerializable(in);
+                        return f.getPrimaryPath();
+                    } else {
+                        throw noMatch();
+                    }
                 }
-            };
+            }.get(); // this extracts the received message
+
+            assertTrue(out, out.contains("member-2-shard-astronauts-config"));
         }};
     }
 
     @Test
-    public void testOnReceiveMemberUp() throws Exception {
+    public void testOnReceiveMemberDown() throws Exception {
 
+        new JavaTestKit(system) {{
+            final Props props = ShardManager
+                .props("config", new MockClusterWrapper(),
+                    new MockConfiguration(), new DatastoreContext());
+
+            final ActorRef subject = getSystem().actorOf(props);
+
+            MockClusterWrapper.sendMemberUp(subject, "member-2", getRef().path().toString());
+
+            subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+
+            expectMsgClass(duration("3 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
+
+            MockClusterWrapper.sendMemberRemoved(subject, "member-2", getRef().path().toString());
+
+            subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+
+            expectMsgClass(duration("1 seconds"), PrimaryNotFound.SERIALIZABLE_CLASS);
+        }};
+    }
+
+    @Test
+    public void testOnRecoveryJournalIsEmptied(){
+        MyJournal.addToJournal(1L, new ShardManager.SchemaContextModules(
+            ImmutableSet.of("foo")));
+
+        assertEquals(1, MyJournal.get().size());
+
+        new JavaTestKit(system) {{
+            final Props props = ShardManager
+                .props("config", new MockClusterWrapper(),
+                    new MockConfiguration(), new DatastoreContext());
+
+            final ActorRef subject = getSystem().actorOf(props);
+
+            // Send message to check that ShardManager is ready
+            subject.tell(new FindPrimary("unknown").toSerializable(), getRef());
+
+            expectMsgClass(duration("3 seconds"), PrimaryNotFound.SERIALIZABLE_CLASS);
+
+            assertEquals(0, MyJournal.get().size());
+        }};
+    }
+
+    @Test
+    public void testOnRecoveryPreviouslyKnownModulesAreDiscovered() throws Exception {
         new JavaTestKit(system) {{
             final Props props = ShardManager
                 .props("config", new MockClusterWrapper(),
@@ -174,39 +262,63 @@ public class ShardManagerTest {
             final TestActorRef<ShardManager> subject =
                 TestActorRef.create(system, props);
 
-            // the run() method needs to finish within 3 seconds
-            new Within(duration("10 seconds")) {
-                @Override
-                protected void run() {
-
-                    MockClusterWrapper.sendMemberUp(subject, "member-2", getRef().path().toString());
-
-                    subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
-
-                    final String out = new ExpectMsg<String>(duration("1 seconds"), "primary found") {
-                        // do not put code outside this method, will run afterwards
-                        @Override
-                        protected String match(Object in) {
-                            if (in.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
-                                PrimaryFound f = PrimaryFound.fromSerializable(in);
-                                return f.getPrimaryPath();
-                            } else {
-                                throw noMatch();
-                            }
-                        }
-                    }.get(); // this extracts the received message
+            subject.underlyingActor().onReceiveRecover(new ShardManager.SchemaContextModules(ImmutableSet.of("foo")));
 
-                    Assert.assertTrue(out, out.contains("member-2-shard-astronauts-config"));
+            Collection<String> knownModules = subject.underlyingActor().getKnownModules();
 
-                    expectNoMsg();
-                }
-            };
+            assertTrue(knownModules.contains("foo"));
         }};
     }
 
     @Test
-    public void testOnReceiveMemberDown() throws Exception {
+    public void testOnUpdateSchemaContextUpdateKnownModulesIfTheyContainASuperSetOfTheKnownModules()
+        throws Exception {
+        new JavaTestKit(system) {{
+            final Props props = ShardManager
+                .props("config", new MockClusterWrapper(),
+                    new MockConfiguration(), new DatastoreContext());
+            final TestActorRef<ShardManager> subject =
+                TestActorRef.create(system, props);
+
+            Collection<String> knownModules = subject.underlyingActor().getKnownModules();
+
+            assertEquals(0, knownModules.size());
+
+            SchemaContext schemaContext = mock(SchemaContext.class);
+            Set<ModuleIdentifier> moduleIdentifierSet = new HashSet<>();
+
+            ModuleIdentifier foo = mock(ModuleIdentifier.class);
+            when(foo.getNamespace()).thenReturn(new URI("foo"));
+
+            moduleIdentifierSet.add(foo);
+
+            when(schemaContext.getAllModuleIdentifiers()).thenReturn(moduleIdentifierSet);
+
+            subject.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+            assertTrue(knownModules.contains("foo"));
+
+            assertEquals(1, knownModules.size());
+
+            ModuleIdentifier bar = mock(ModuleIdentifier.class);
+            when(bar.getNamespace()).thenReturn(new URI("bar"));
+
+            moduleIdentifierSet.add(bar);
+
+            subject.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+            assertTrue(knownModules.contains("bar"));
 
+            assertEquals(2, knownModules.size());
+
+        }};
+
+    }
+
+
+    @Test
+    public void testOnUpdateSchemaContextDoNotUpdateKnownModulesIfTheyDoNotContainASuperSetOfKnownModules()
+        throws Exception {
         new JavaTestKit(system) {{
             final Props props = ShardManager
                 .props("config", new MockClusterWrapper(),
@@ -214,28 +326,117 @@ public class ShardManagerTest {
             final TestActorRef<ShardManager> subject =
                 TestActorRef.create(system, props);
 
-            // the run() method needs to finish within 3 seconds
-            new Within(duration("10 seconds")) {
-                @Override
-                protected void run() {
+            Collection<String> knownModules = subject.underlyingActor().getKnownModules();
 
-                    MockClusterWrapper.sendMemberUp(subject, "member-2", getRef().path().toString());
+            assertEquals(0, knownModules.size());
 
-                    subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+            SchemaContext schemaContext = mock(SchemaContext.class);
+            Set<ModuleIdentifier> moduleIdentifierSet = new HashSet<>();
 
-                    expectMsgClass(duration("1 seconds"), PrimaryFound.SERIALIZABLE_CLASS);
+            ModuleIdentifier foo = mock(ModuleIdentifier.class);
+            when(foo.getNamespace()).thenReturn(new URI("foo"));
 
-                    MockClusterWrapper.sendMemberRemoved(subject, "member-2", getRef().path().toString());
+            moduleIdentifierSet.add(foo);
 
-                    subject.tell(new FindPrimary("astronauts").toSerializable(), getRef());
+            when(schemaContext.getAllModuleIdentifiers()).thenReturn(moduleIdentifierSet);
 
-                    expectMsgClass(duration("1 seconds"), PrimaryNotFound.SERIALIZABLE_CLASS);
+            subject.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+            assertTrue(knownModules.contains("foo"));
+
+            assertEquals(1, knownModules.size());
+
+            //Create a completely different SchemaContext with only the bar module in it
+            schemaContext = mock(SchemaContext.class);
+            moduleIdentifierSet = new HashSet<>();
+            ModuleIdentifier bar = mock(ModuleIdentifier.class);
+            when(bar.getNamespace()).thenReturn(new URI("bar"));
+
+            moduleIdentifierSet.add(bar);
+
+            subject.underlyingActor().onReceiveCommand(new UpdateSchemaContext(schemaContext));
+
+            assertFalse(knownModules.contains("bar"));
+
+            assertEquals(1, knownModules.size());
 
-                    expectNoMsg();
-                }
-            };
         }};
+
+    }
+
+
+    private void sleep(long period){
+        Uninterruptibles.sleepUninterruptibly(period, TimeUnit.MILLISECONDS);
     }
 
+    public static class MyJournal extends AsyncWriteJournal {
+
+        private static Map<Long, Object> journal = Maps.newTreeMap();
+
+        public static void addToJournal(Long sequenceNr, Object value){
+            journal.put(sequenceNr, value);
+        }
+
+        public static Map<Long, Object> get(){
+            return journal;
+        }
+
+        public static void clear(){
+            journal.clear();
+        }
 
+        @Override public Future<Void> doAsyncReplayMessages(final String persistenceId, long fromSequenceNr, long toSequenceNr, long max,
+            final Procedure<PersistentRepr> replayCallback) {
+            if(journal.size() == 0){
+                return Futures.successful(null);
+            }
+            return Futures.future(new Callable<Void>() {
+                @Override
+                public Void call() throws Exception {
+                    for (Map.Entry<Long, Object> entry : journal.entrySet()) {
+                        PersistentRepr persistentMessage =
+                            new PersistentImpl(entry.getValue(), entry.getKey(), persistenceId,
+                                false, null, null);
+                        replayCallback.apply(persistentMessage);
+                    }
+                    return null;
+                }
+            }, context().dispatcher());
+        }
+
+        @Override public Future<Long> doAsyncReadHighestSequenceNr(String s, long l) {
+            return Futures.successful(-1L);
+        }
+
+        @Override public Future<Void> doAsyncWriteMessages(
+            final Iterable<PersistentRepr> persistentReprs) {
+            return Futures.future(new Callable<Void>() {
+                @Override
+                public Void call() throws Exception {
+                    for (PersistentRepr repr : persistentReprs){
+                        if(repr.payload() instanceof ShardManager.SchemaContextModules) {
+                            journal.put(repr.sequenceNr(), repr.payload());
+                        }
+                    }
+                    return null;
+                }
+            }, context().dispatcher());
+        }
+
+        @Override public Future<Void> doAsyncWriteConfirmations(
+            Iterable<PersistentConfirmation> persistentConfirmations) {
+            return Futures.successful(null);
+        }
+
+        @Override public Future<Void> doAsyncDeleteMessages(Iterable<PersistentId> persistentIds,
+            boolean b) {
+            clear();
+            return Futures.successful(null);
+        }
+
+        @Override public Future<Void> doAsyncDeleteMessagesTo(String s, long l, boolean b) {
+            clear();
+            return Futures.successful(null);
+        }
+    }
 }
index deb71c2df4aa9cc522904e4014ed66d536aa2fa4..a3e0b3a07d70993b5c403e7712dfeb1c3ad0c96d 100644 (file)
@@ -4,23 +4,43 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.actor.Props;
 import akka.event.Logging;
+import akka.japi.Creator;
 import akka.testkit.JavaTestKit;
 import akka.testkit.TestActorRef;
 import com.google.common.base.Optional;
 import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.MoreExecutors;
+import org.junit.After;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.cluster.datastore.messages.ForwardedCommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
+import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
+import org.opendaylight.controller.cluster.datastore.modification.Modification;
+import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
+import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
 import org.opendaylight.controller.cluster.datastore.node.NormalizedNodeToNodeCodec;
+import org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal;
+import org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
+import org.opendaylight.controller.cluster.raft.Snapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelper;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
@@ -28,222 +48,138 @@ import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
 import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
 import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
-
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.concurrent.duration.Duration;
 import java.io.IOException;
 import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
-
+import java.util.concurrent.TimeUnit;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.verify;
 
 public class ShardTest extends AbstractActorTest {
 
-    private static final DatastoreContext DATA_STORE_CONTEXT = new DatastoreContext();
+    private static final DatastoreContext DATA_STORE_CONTEXT =
+            new DatastoreContext("", null, Duration.create(10, TimeUnit.MINUTES), 5, 3, 5000, 500);
 
-    @Test
-    public void testOnReceiveRegisterListener() throws Exception {
-        new JavaTestKit(getSystem()) {{
-            final ShardIdentifier identifier =
-                ShardIdentifier.builder().memberName("member-1")
-                    .shardName("inventory").type("config").build();
+    private static final SchemaContext SCHEMA_CONTEXT = TestModel.createTestContext();
 
-            final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
-            final ActorRef subject =
-                getSystem().actorOf(props, "testRegisterChangeListener");
+    private static final ShardIdentifier IDENTIFIER = ShardIdentifier.builder().memberName("member-1")
+            .shardName("inventory").type("config").build();
 
-            new Within(duration("3 seconds")) {
-                @Override
-                protected void run() {
+    @Before
+    public void setUp() {
+        System.setProperty("shard.persistent", "false");
 
-                    subject.tell(
-                        new UpdateSchemaContext(SchemaContextHelper.full()),
-                        getRef());
+        InMemorySnapshotStore.clear();
+        InMemoryJournal.clear();
+    }
 
-                    subject.tell(new RegisterChangeListener(TestModel.TEST_PATH,
-                        getRef().path(), AsyncDataBroker.DataChangeScope.BASE),
-                        getRef());
+    @After
+    public void tearDown() {
+        InMemorySnapshotStore.clear();
+        InMemoryJournal.clear();
+    }
 
-                    final Boolean notificationEnabled = new ExpectMsg<Boolean>(
-                                                   duration("3 seconds"), "enable notification") {
-                        // do not put code outside this method, will run afterwards
-                        @Override
-                        protected Boolean match(Object in) {
-                            if(in instanceof EnableNotification){
-                                return ((EnableNotification) in).isEnabled();
-                            } else {
-                                throw noMatch();
-                            }
-                        }
-                    }.get(); // this extracts the received message
-
-                    assertFalse(notificationEnabled);
-
-                    final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
-                        // do not put code outside this method, will run afterwards
-                        @Override
-                        protected String match(Object in) {
-                            if (in.getClass().equals(RegisterChangeListenerReply.class)) {
-                                RegisterChangeListenerReply reply =
-                                    (RegisterChangeListenerReply) in;
-                                return reply.getListenerRegistrationPath()
-                                    .toString();
-                            } else {
-                                throw noMatch();
-                            }
-                        }
-                    }.get(); // this extracts the received message
+    private Props newShardProps() {
+        return Shard.props(IDENTIFIER, Collections.<ShardIdentifier,String>emptyMap(),
+                DATA_STORE_CONTEXT, SCHEMA_CONTEXT);
+    }
 
-                    assertTrue(out.matches(
-                        "akka:\\/\\/test\\/user\\/testRegisterChangeListener\\/\\$.*"));
-                }
+    @Test
+    public void testOnReceiveRegisterListener() throws Exception {
+        new JavaTestKit(getSystem()) {{
+            ActorRef subject = getSystem().actorOf(newShardProps(), "testRegisterChangeListener");
 
+            subject.tell(new UpdateSchemaContext(SchemaContextHelper.full()), getRef());
 
-            };
+            subject.tell(new RegisterChangeListener(TestModel.TEST_PATH,
+                    getRef().path(), AsyncDataBroker.DataChangeScope.BASE), getRef());
+
+            EnableNotification enable = expectMsgClass(duration("3 seconds"), EnableNotification.class);
+            assertEquals("isEnabled", false, enable.isEnabled());
+
+            RegisterChangeListenerReply reply = expectMsgClass(duration("3 seconds"),
+                    RegisterChangeListenerReply.class);
+            assertTrue(reply.getListenerRegistrationPath().toString().matches(
+                    "akka:\\/\\/test\\/user\\/testRegisterChangeListener\\/\\$.*"));
         }};
     }
 
     @Test
     public void testCreateTransaction(){
-        new JavaTestKit(getSystem()) {{
-            final ShardIdentifier identifier =
-                ShardIdentifier.builder().memberName("member-1")
-                    .shardName("inventory").type("config").build();
-
-            final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
-            final ActorRef subject =
-                getSystem().actorOf(props, "testCreateTransaction");
-
-            // Wait for a specific log message to show up
-            final boolean result =
-                new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
-                ) {
-                    @Override
-                    protected Boolean run() {
-                        return true;
-                    }
-                }.from(subject.path().toString())
-                    .message("Switching from state Candidate to Leader")
-                    .occurrences(1).exec();
-
-            Assert.assertEquals(true, result);
+        new ShardTestKit(getSystem()) {{
+            ActorRef subject = getSystem().actorOf(newShardProps(), "testCreateTransaction");
 
-            new Within(duration("3 seconds")) {
-                @Override
-                protected void run() {
+            waitUntilLeader(subject);
 
-                    subject.tell(
-                        new UpdateSchemaContext(TestModel.createTestContext()),
-                        getRef());
+            subject.tell(new UpdateSchemaContext(TestModel.createTestContext()), getRef());
 
-                    subject.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.READ_ONLY.ordinal() ).toSerializable(),
-                        getRef());
+            subject.tell(new CreateTransaction("txn-1",
+                    TransactionProxy.TransactionType.READ_ONLY.ordinal() ).toSerializable(), getRef());
 
-                    final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
-                        // do not put code outside this method, will run afterwards
-                        @Override
-                        protected String match(Object in) {
-                            if (in instanceof CreateTransactionReply) {
-                                CreateTransactionReply reply =
-                                    (CreateTransactionReply) in;
-                                return reply.getTransactionActorPath()
-                                    .toString();
-                            } else {
-                                throw noMatch();
-                            }
-                        }
-                    }.get(); // this extracts the received message
+            CreateTransactionReply reply = expectMsgClass(duration("3 seconds"),
+                    CreateTransactionReply.class);
 
-                    assertTrue("Unexpected transaction path " + out,
-                        out.contains("akka://test/user/testCreateTransaction/shard-txn-1"));
-                    expectNoMsg();
-                }
-            };
+            String path = reply.getTransactionActorPath().toString();
+            assertTrue("Unexpected transaction path " + path,
+                    path.contains("akka://test/user/testCreateTransaction/shard-txn-1"));
+            expectNoMsg();
         }};
     }
 
     @Test
     public void testCreateTransactionOnChain(){
-        new JavaTestKit(getSystem()) {{
-            final ShardIdentifier identifier =
-                ShardIdentifier.builder().memberName("member-1")
-                    .shardName("inventory").type("config").build();
-
-            final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
-            final ActorRef subject =
-                getSystem().actorOf(props, "testCreateTransactionOnChain");
-
-            // Wait for a specific log message to show up
-            final boolean result =
-                new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
-                ) {
-                    @Override
-                    protected Boolean run() {
-                        return true;
-                    }
-                }.from(subject.path().toString())
-                    .message("Switching from state Candidate to Leader")
-                    .occurrences(1).exec();
-
-            Assert.assertEquals(true, result);
-
-            new Within(duration("3 seconds")) {
-                @Override
-                protected void run() {
+        new ShardTestKit(getSystem()) {{
+            final ActorRef subject = getSystem().actorOf(newShardProps(), "testCreateTransactionOnChain");
 
-                    subject.tell(
-                        new UpdateSchemaContext(TestModel.createTestContext()),
-                        getRef());
+            waitUntilLeader(subject);
 
-                    subject.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.READ_ONLY.ordinal() , "foobar").toSerializable(),
-                        getRef());
+            subject.tell(new CreateTransaction("txn-1",
+                    TransactionProxy.TransactionType.READ_ONLY.ordinal() , "foobar").toSerializable(),
+                    getRef());
 
-                    final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
-                        // do not put code outside this method, will run afterwards
-                        @Override
-                        protected String match(Object in) {
-                            if (in instanceof CreateTransactionReply) {
-                                CreateTransactionReply reply =
-                                    (CreateTransactionReply) in;
-                                return reply.getTransactionActorPath()
-                                    .toString();
-                            } else {
-                                throw noMatch();
-                            }
-                        }
-                    }.get(); // this extracts the received message
+            CreateTransactionReply reply = expectMsgClass(duration("3 seconds"),
+                    CreateTransactionReply.class);
 
-                    assertTrue("Unexpected transaction path " + out,
-                        out.contains("akka://test/user/testCreateTransactionOnChain/shard-txn-1"));
-                    expectNoMsg();
-                }
-            };
+            String path = reply.getTransactionActorPath().toString();
+            assertTrue("Unexpected transaction path " + path,
+                    path.contains("akka://test/user/testCreateTransactionOnChain/shard-txn-1"));
+            expectNoMsg();
         }};
     }
 
     @Test
     public void testPeerAddressResolved(){
         new JavaTestKit(getSystem()) {{
-            Map<ShardIdentifier, String> peerAddresses = new HashMap<>();
-
             final ShardIdentifier identifier =
                 ShardIdentifier.builder().memberName("member-1")
                     .shardName("inventory").type("config").build();
 
-            peerAddresses.put(identifier, null);
-            final Props props = Shard.props(identifier, peerAddresses, DATA_STORE_CONTEXT, TestModel.createTestContext());
-            final ActorRef subject =
-                getSystem().actorOf(props, "testPeerAddressResolved");
+            Props props = Shard.props(identifier,
+                    Collections.<ShardIdentifier, String>singletonMap(identifier, null),
+                    DATA_STORE_CONTEXT, SCHEMA_CONTEXT);
+            final ActorRef subject = getSystem().actorOf(props, "testPeerAddressResolved");
 
             new Within(duration("3 seconds")) {
                 @Override
@@ -261,99 +197,205 @@ public class ShardTest extends AbstractActorTest {
 
     @Test
     public void testApplySnapshot() throws ExecutionException, InterruptedException {
-        Map<ShardIdentifier, String> peerAddresses = new HashMap<>();
+        TestActorRef<Shard> ref = TestActorRef.create(getSystem(), newShardProps());
 
-        final ShardIdentifier identifier =
-            ShardIdentifier.builder().memberName("member-1")
-                .shardName("inventory").type("config").build();
+        NormalizedNodeToNodeCodec codec =
+            new NormalizedNodeToNodeCodec(SCHEMA_CONTEXT);
 
-        peerAddresses.put(identifier, null);
-        final Props props = Shard.props(identifier, peerAddresses, DATA_STORE_CONTEXT, TestModel.createTestContext());
+        ref.underlyingActor().writeToStore(TestModel.TEST_PATH, ImmutableNodes.containerNode(
+                TestModel.TEST_QNAME));
 
-        TestActorRef<Shard> ref = TestActorRef.create(getSystem(), props);
+        YangInstanceIdentifier root = YangInstanceIdentifier.builder().build();
+        NormalizedNode<?,?> expected = ref.underlyingActor().readStore(root);
 
-        ref.underlyingActor().updateSchemaContext(TestModel.createTestContext());
+        NormalizedNodeMessages.Container encode = codec.encode(root, expected);
 
-        NormalizedNodeToNodeCodec codec =
-            new NormalizedNodeToNodeCodec(TestModel.createTestContext());
+        ApplySnapshot applySnapshot = new ApplySnapshot(Snapshot.create(
+                encode.getNormalizedNode().toByteString().toByteArray(),
+                Collections.<ReplicatedLogEntry>emptyList(), 1, 2, 3, 4));
 
-        ref.underlyingActor().writeToStore(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+        ref.underlyingActor().onReceiveCommand(applySnapshot);
 
-        NormalizedNode expected = ref.underlyingActor().readStore();
+        NormalizedNode<?,?> actual = ref.underlyingActor().readStore(root);
 
-        NormalizedNodeMessages.Container encode = codec
-            .encode(YangInstanceIdentifier.builder().build(), expected);
+        assertEquals(expected, actual);
+    }
 
+    @Test
+    public void testApplyState() throws Exception {
 
-        ref.underlyingActor().applySnapshot(encode.getNormalizedNode().toByteString());
+        TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps());
 
-        NormalizedNode actual = ref.underlyingActor().readStore();
+        NormalizedNode<?, ?> node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
 
-        assertEquals(expected, actual);
-    }
+        MutableCompositeModification compMod = new MutableCompositeModification();
+        compMod.addModification(new WriteModification(TestModel.TEST_PATH, node, SCHEMA_CONTEXT));
+        Payload payload = new CompositeModificationPayload(compMod.toSerializable());
+        ApplyState applyState = new ApplyState(null, "test",
+                new ReplicatedLogImplEntry(1, 2, payload));
 
-    private static class ShardTestKit extends JavaTestKit {
+        shard.underlyingActor().onReceiveCommand(applyState);
 
-        private ShardTestKit(ActorSystem actorSystem) {
-            super(actorSystem);
+        NormalizedNode<?,?> actual = shard.underlyingActor().readStore(TestModel.TEST_PATH);
+        assertEquals("Applied state", node, actual);
+    }
+
+    @SuppressWarnings("serial")
+    @Test
+    public void testRecovery() throws Exception {
+
+        // Set up the InMemorySnapshotStore.
+
+        InMemoryDOMDataStore testStore = InMemoryDOMDataStoreFactory.create("Test", null, null);
+        testStore.onGlobalContextUpdated(SCHEMA_CONTEXT);
+
+        DOMStoreWriteTransaction writeTx = testStore.newWriteOnlyTransaction();
+        writeTx.write(TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME));
+        DOMStoreThreePhaseCommitCohort commitCohort = writeTx.ready();
+        commitCohort.preCommit().get();
+        commitCohort.commit().get();
+
+        DOMStoreReadTransaction readTx = testStore.newReadOnlyTransaction();
+        NormalizedNode<?, ?> root = readTx.read(YangInstanceIdentifier.builder().build()).get().get();
+
+        InMemorySnapshotStore.addSnapshot(IDENTIFIER.toString(), Snapshot.create(
+                new NormalizedNodeToNodeCodec(SCHEMA_CONTEXT).encode(
+                        YangInstanceIdentifier.builder().build(), root).
+                                getNormalizedNode().toByteString().toByteArray(),
+                                Collections.<ReplicatedLogEntry>emptyList(), 0, 1, -1, -1));
+
+        // Set up the InMemoryJournal.
+
+        InMemoryJournal.addEntry(IDENTIFIER.toString(), 0, new ReplicatedLogImplEntry(0, 1, newPayload(
+                  new WriteModification(TestModel.OUTER_LIST_PATH,
+                          ImmutableNodes.mapNodeBuilder(TestModel.OUTER_LIST_QNAME).build(),
+                          SCHEMA_CONTEXT))));
+
+        int nListEntries = 11;
+        Set<Integer> listEntryKeys = new HashSet<>();
+        for(int i = 1; i <= nListEntries; i++) {
+            listEntryKeys.add(Integer.valueOf(i));
+            YangInstanceIdentifier path = YangInstanceIdentifier.builder(TestModel.OUTER_LIST_PATH)
+                    .nodeWithKey(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i).build();
+            Modification mod = new MergeModification(path,
+                    ImmutableNodes.mapEntry(TestModel.OUTER_LIST_QNAME, TestModel.ID_QNAME, i),
+                    SCHEMA_CONTEXT);
+            InMemoryJournal.addEntry(IDENTIFIER.toString(), i, new ReplicatedLogImplEntry(i, 1,
+                    newPayload(mod)));
         }
 
-        protected void waitForLogMessage(final Class logLevel, ActorRef subject, String logMessage){
-            // Wait for a specific log message to show up
-            final boolean result =
-                new JavaTestKit.EventFilter<Boolean>(logLevel
-                ) {
+        InMemoryJournal.addEntry(IDENTIFIER.toString(), nListEntries + 1,
+                new ApplyLogEntries(nListEntries));
+
+        // Create the actor and wait for recovery complete.
+
+        final CountDownLatch recoveryComplete = new CountDownLatch(1);
+
+        Creator<Shard> creator = new Creator<Shard>() {
+            @Override
+            public Shard create() throws Exception {
+                return new Shard(IDENTIFIER, Collections.<ShardIdentifier,String>emptyMap(),
+                        DATA_STORE_CONTEXT, SCHEMA_CONTEXT) {
                     @Override
-                    protected Boolean run() {
-                        return true;
+                    protected void onRecoveryComplete() {
+                        try {
+                            super.onRecoveryComplete();
+                        } finally {
+                            recoveryComplete.countDown();
+                        }
                     }
-                }.from(subject.path().toString())
-                    .message(logMessage)
-                    .occurrences(1).exec();
+                };
+            }
+        };
 
-            Assert.assertEquals(true, result);
+        TestActorRef<Shard> shard = TestActorRef.create(getSystem(),
+                Props.create(new DelegatingShardCreator(creator)), "testRecovery");
+
+        assertEquals("Recovery complete", true, recoveryComplete.await(5, TimeUnit.SECONDS));
+
+        // Verify data in the data store.
+
+        NormalizedNode<?, ?> outerList = shard.underlyingActor().readStore(TestModel.OUTER_LIST_PATH);
+        assertNotNull(TestModel.OUTER_LIST_QNAME.getLocalName() + " not found", outerList);
+        assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " value is not Iterable",
+                outerList.getValue() instanceof Iterable);
+        for(Object entry: (Iterable<?>) outerList.getValue()) {
+            assertTrue(TestModel.OUTER_LIST_QNAME.getLocalName() + " entry is not MapEntryNode",
+                    entry instanceof MapEntryNode);
+            MapEntryNode mapEntry = (MapEntryNode)entry;
+            Optional<DataContainerChild<? extends PathArgument, ?>> idLeaf =
+                    mapEntry.getChild(new YangInstanceIdentifier.NodeIdentifier(TestModel.ID_QNAME));
+            assertTrue("Missing leaf " + TestModel.ID_QNAME.getLocalName(), idLeaf.isPresent());
+            Object value = idLeaf.get().getValue();
+            assertTrue("Unexpected value for leaf "+ TestModel.ID_QNAME.getLocalName() + ": " + value,
+                    listEntryKeys.remove(value));
+        }
 
+        if(!listEntryKeys.isEmpty()) {
+            fail("Missing " + TestModel.OUTER_LIST_QNAME.getLocalName() + " entries with keys: " +
+                    listEntryKeys);
         }
 
+        assertEquals("Last log index", nListEntries,
+                shard.underlyingActor().getShardMBean().getLastLogIndex());
+        assertEquals("Commit index", nListEntries,
+                shard.underlyingActor().getShardMBean().getCommitIndex());
+        assertEquals("Last applied", nListEntries,
+                shard.underlyingActor().getShardMBean().getLastApplied());
     }
 
+    private CompositeModificationPayload newPayload(Modification... mods) {
+        MutableCompositeModification compMod = new MutableCompositeModification();
+        for(Modification mod: mods) {
+            compMod.addModification(mod);
+        }
+
+        return new CompositeModificationPayload(compMod.toSerializable());
+    }
+
+    @SuppressWarnings("unchecked")
     @Test
-    public void testCreateSnapshot() throws IOException, InterruptedException {
+    public void testForwardedCommitTransactionWithPersistence() throws IOException {
+        System.setProperty("shard.persistent", "true");
+
         new ShardTestKit(getSystem()) {{
-            final ShardIdentifier identifier =
-                ShardIdentifier.builder().memberName("member-1")
-                    .shardName("inventory").type("config").build();
+            TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps());
 
-            final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
-            final ActorRef subject =
-                getSystem().actorOf(props, "testCreateSnapshot");
+            waitUntilLeader(shard);
 
-            // Wait for a specific log message to show up
-            this.waitForLogMessage(Logging.Info.class, subject, "Switching from state Candidate to Leader");
+            NormalizedNode<?, ?> node = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
 
+            DOMStoreThreePhaseCommitCohort cohort = mock(DOMStoreThreePhaseCommitCohort.class);
+            doReturn(Futures.immediateFuture(null)).when(cohort).commit();
 
-            new Within(duration("3 seconds")) {
-                @Override
-                protected void run() {
+            MutableCompositeModification modification = new MutableCompositeModification();
+            modification.addModification(new WriteModification(TestModel.TEST_PATH, node,
+                    SCHEMA_CONTEXT));
 
-                    subject.tell(
-                        new UpdateSchemaContext(TestModel.createTestContext()),
-                        getRef());
+            shard.tell(new ForwardedCommitTransaction(cohort, modification), getRef());
 
-                    subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
-                        getRef());
+            expectMsgClass(duration("5 seconds"), CommitTransactionReply.SERIALIZABLE_CLASS);
 
-                    waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+            verify(cohort).commit();
 
-                    subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
-                        getRef());
+            assertEquals("Last log index", 0, shard.underlyingActor().getShardMBean().getLastLogIndex());
+        }};
+    }
 
-                    waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+    @Test
+    public void testCreateSnapshot() throws IOException, InterruptedException {
+        new ShardTestKit(getSystem()) {{
+            final ActorRef subject = getSystem().actorOf(newShardProps(), "testCreateSnapshot");
 
-                }
-            };
+            waitUntilLeader(subject);
+
+            subject.tell(new CaptureSnapshot(-1,-1,-1,-1), getRef());
+
+            waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+
+            subject.tell(new CaptureSnapshot(-1,-1,-1,-1), getRef());
 
-            deletePersistenceFiles();
+            waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
         }};
     }
 
@@ -366,7 +408,7 @@ public class ShardTest extends AbstractActorTest {
         InMemoryDOMDataStore store = new InMemoryDOMDataStore("test", MoreExecutors.listeningDecorator(
             MoreExecutors.sameThreadExecutor()), MoreExecutors.sameThreadExecutor());
 
-        store.onGlobalContextUpdated(TestModel.createTestContext());
+        store.onGlobalContextUpdated(SCHEMA_CONTEXT);
 
         DOMStoreWriteTransaction putTransaction = store.newWriteOnlyTransaction();
         putTransaction.write(TestModel.TEST_PATH,
@@ -424,4 +466,46 @@ public class ShardTest extends AbstractActorTest {
             }
         };
     }
+
+    private static final class DelegatingShardCreator implements Creator<Shard> {
+        private final Creator<Shard> delegate;
+
+        DelegatingShardCreator(Creator<Shard> delegate) {
+            this.delegate = delegate;
+        }
+
+        @Override
+        public Shard create() throws Exception {
+            return delegate.create();
+        }
+    }
+
+    private static class ShardTestKit extends JavaTestKit {
+
+        private ShardTestKit(ActorSystem actorSystem) {
+            super(actorSystem);
+        }
+
+        protected void waitForLogMessage(final Class logLevel, ActorRef subject, String logMessage){
+            // Wait for a specific log message to show up
+            final boolean result =
+                new JavaTestKit.EventFilter<Boolean>(logLevel
+                ) {
+                    @Override
+                    protected Boolean run() {
+                        return true;
+                    }
+                }.from(subject.path().toString())
+                    .message(logMessage)
+                    .occurrences(1).exec();
+
+            Assert.assertEquals(true, result);
+
+        }
+
+        protected void waitUntilLeader(ActorRef subject) {
+            waitForLogMessage(Logging.Info.class, subject,
+                    "Switching from state Candidate to Leader");
+        }
+    }
 }
index 0beb00b435ebe622d888dd58b40d938877c4d612..3f31591c79c4f7d3aa6dff45f518c2f8d38726e0 100644 (file)
@@ -503,7 +503,7 @@ public class ShardTransactionTest extends AbstractActorTest {
 
         datastoreContext = new DatastoreContext("Test",
                 InMemoryDOMDataStoreConfigProperties.getDefault(),
-                Duration.create(500, TimeUnit.MILLISECONDS), 5);
+                Duration.create(500, TimeUnit.MILLISECONDS), 5, 1000, 1000, 500);
 
         new JavaTestKit(getSystem()) {{
             final ActorRef shard = createShard();
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/InMemoryJournal.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/InMemoryJournal.java
new file mode 100644 (file)
index 0000000..c9a0eaf
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import java.util.Map;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import com.google.common.collect.Maps;
+import scala.concurrent.Future;
+import akka.dispatch.Futures;
+import akka.japi.Procedure;
+import akka.persistence.PersistentConfirmation;
+import akka.persistence.PersistentId;
+import akka.persistence.PersistentImpl;
+import akka.persistence.PersistentRepr;
+import akka.persistence.journal.japi.AsyncWriteJournal;
+
+public class InMemoryJournal extends AsyncWriteJournal {
+
+    private static Map<String, Map<Long, Object>> journals = new ConcurrentHashMap<>();
+
+    public static void addEntry(String persistenceId, long sequenceNr, Object data) {
+        Map<Long, Object> journal = journals.get(persistenceId);
+        if(journal == null) {
+            journal = Maps.newLinkedHashMap();
+            journals.put(persistenceId, journal);
+        }
+
+        journal.put(sequenceNr, data);
+    }
+
+    public static void clear() {
+        journals.clear();
+    }
+
+    @Override
+    public Future<Void> doAsyncReplayMessages(final String persistenceId, long fromSequenceNr,
+            long toSequenceNr, long max, final Procedure<PersistentRepr> replayCallback) {
+        return Futures.future(new Callable<Void>() {
+            @Override
+            public Void call() throws Exception {
+                Map<Long, Object> journal = journals.get(persistenceId);
+                if(journal == null) {
+                    return null;
+                }
+
+                for (Map.Entry<Long,Object> entry : journal.entrySet()) {
+                    PersistentRepr persistentMessage =
+                        new PersistentImpl(entry.getValue(), entry.getKey(), persistenceId, false, null, null);
+                    replayCallback.apply(persistentMessage);
+                }
+
+                return null;
+            }
+        }, context().dispatcher());
+    }
+
+    @Override
+    public Future<Long> doAsyncReadHighestSequenceNr(String persistenceId, long fromSequenceNr) {
+        return Futures.successful(new Long(0));
+    }
+
+    @Override
+    public Future<Void> doAsyncWriteMessages(Iterable<PersistentRepr> messages) {
+        return Futures.successful(null);
+    }
+
+    @Override
+    public Future<Void> doAsyncWriteConfirmations(Iterable<PersistentConfirmation> confirmations) {
+        return Futures.successful(null);
+    }
+
+    @Override
+    public Future<Void> doAsyncDeleteMessages(Iterable<PersistentId> messageIds, boolean permanent) {
+        return Futures.successful(null);
+    }
+
+    @Override
+    public Future<Void> doAsyncDeleteMessagesTo(String persistenceId, long toSequenceNr, boolean permanent) {
+        return Futures.successful(null);
+    }
+}
index 0e492f0fbbc57ada924540b7f2958ea9c81ae437..22e522b760792366997fe0e4576ab4d576fc390f 100644 (file)
@@ -16,46 +16,66 @@ import akka.persistence.SnapshotSelectionCriteria;
 import akka.persistence.snapshot.japi.SnapshotStore;
 import com.google.common.collect.Iterables;
 import scala.concurrent.Future;
-
 import java.util.ArrayList;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import org.opendaylight.controller.cluster.raft.Snapshot;
 
 public class InMemorySnapshotStore extends SnapshotStore {
 
-    Map<String, List<Snapshot>> snapshots = new HashMap<>();
+    private static Map<String, List<StoredSnapshot>> snapshots = new ConcurrentHashMap<>();
+
+    public static void addSnapshot(String persistentId, Snapshot snapshot) {
+        List<StoredSnapshot> snapshotList = snapshots.get(persistentId);
+
+        if(snapshotList == null) {
+            snapshotList = new ArrayList<>();
+            snapshots.put(persistentId, snapshotList);
+        }
+
+        snapshotList.add(new StoredSnapshot(new SnapshotMetadata(persistentId, snapshotList.size(),
+                System.currentTimeMillis()), snapshot));
+    }
+
+    public static void clear() {
+        snapshots.clear();
+    }
 
-    @Override public Future<Option<SelectedSnapshot>> doLoadAsync(String s,
+    @Override
+    public Future<Option<SelectedSnapshot>> doLoadAsync(String s,
         SnapshotSelectionCriteria snapshotSelectionCriteria) {
-        List<Snapshot> snapshotList = snapshots.get(s);
+        List<StoredSnapshot> snapshotList = snapshots.get(s);
         if(snapshotList == null){
             return Futures.successful(Option.<SelectedSnapshot>none());
         }
 
-        Snapshot snapshot = Iterables.getLast(snapshotList);
+        StoredSnapshot snapshot = Iterables.getLast(snapshotList);
         SelectedSnapshot selectedSnapshot =
             new SelectedSnapshot(snapshot.getMetadata(), snapshot.getData());
         return Futures.successful(Option.some(selectedSnapshot));
     }
 
-    @Override public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
-        List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+    @Override
+    public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
+        List<StoredSnapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
 
         if(snapshotList == null){
             snapshotList = new ArrayList<>();
             snapshots.put(snapshotMetadata.persistenceId(), snapshotList);
         }
-        snapshotList.add(new Snapshot(snapshotMetadata, o));
+        snapshotList.add(new StoredSnapshot(snapshotMetadata, o));
 
         return Futures.successful(null);
     }
 
-    @Override public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
+    @Override
+    public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
     }
 
-    @Override public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
-        List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+    @Override
+    public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
+        List<StoredSnapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
 
         if(snapshotList == null){
             return;
@@ -64,7 +84,7 @@ public class InMemorySnapshotStore extends SnapshotStore {
         int deleteIndex = -1;
 
         for(int i=0;i<snapshotList.size(); i++){
-            Snapshot snapshot = snapshotList.get(i);
+            StoredSnapshot snapshot = snapshotList.get(i);
             if(snapshotMetadata.equals(snapshot.getMetadata())){
                 deleteIndex = i;
                 break;
@@ -77,9 +97,10 @@ public class InMemorySnapshotStore extends SnapshotStore {
 
     }
 
-    @Override public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria)
+    @Override
+    public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria)
         throws Exception {
-        List<Snapshot> snapshotList = snapshots.get(s);
+        List<StoredSnapshot> snapshotList = snapshots.get(s);
 
         if(snapshotList == null){
             return;
@@ -90,11 +111,11 @@ public class InMemorySnapshotStore extends SnapshotStore {
         snapshots.remove(s);
     }
 
-    private static class Snapshot {
+    private static class StoredSnapshot {
         private final SnapshotMetadata metadata;
         private final Object data;
 
-        private Snapshot(SnapshotMetadata metadata, Object data) {
+        private StoredSnapshot(SnapshotMetadata metadata, Object data) {
             this.metadata = metadata;
             this.data = data;
         }
index f0dadc618b2b4769b0240ff1c55567b28c924fb9..3a37dd937656d028bd20e5591975d509521d7a93 100644 (file)
@@ -1,5 +1,6 @@
 akka {
     persistence.snapshot-store.plugin = "in-memory-snapshot-store"
+    persistence.journal.plugin = "in-memory-journal"
 
     loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
 
@@ -17,6 +18,10 @@ akka {
     }
 }
 
+in-memory-journal {
+    class = "org.opendaylight.controller.cluster.datastore.utils.InMemoryJournal"
+}
+
 in-memory-snapshot-store {
   # Class name of the plugin.
   class = "org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore"
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/HashMapDataStoreModule.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/HashMapDataStoreModule.java
deleted file mode 100644 (file)
index df1b5a3..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.md.sal.dom.impl;
-
-import org.opendaylight.controller.sal.dom.broker.impl.HashMapDataStore;
-
-/**
-*
-*/
-public final class HashMapDataStoreModule extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractHashMapDataStoreModule
-{
-
-    public HashMapDataStoreModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
-        super(identifier, dependencyResolver);
-    }
-
-    public HashMapDataStoreModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, HashMapDataStoreModule oldModule, java.lang.AutoCloseable oldInstance) {
-        super(identifier, dependencyResolver, oldModule, oldInstance);
-    }
-
-    @Override
-    public void validate(){
-        super.validate();
-        // Add custom validation for module attributes here.
-    }
-
-    @Override
-    public java.lang.AutoCloseable createInstance() {
-        HashMapDataStore store = new HashMapDataStore();
-        return store;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/HashMapDataStoreModuleFactory.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/HashMapDataStoreModuleFactory.java
deleted file mode 100644 (file)
index 6b5503f..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.md.sal.dom.impl;
-
-/**
-*
-*/
-public class HashMapDataStoreModuleFactory extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractHashMapDataStoreModuleFactory
-{
-
-
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/HashMapDataStore.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/HashMapDataStore.java
deleted file mode 100644 (file)
index 1f82bd7..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.dom.broker.impl;
-
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
-import org.opendaylight.controller.md.sal.common.api.data.DataModification;
-import org.opendaylight.controller.sal.core.api.data.DataStore;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public final class HashMapDataStore implements DataStore, AutoCloseable {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(HashMapDataStore.class);
-
-    private final Map<YangInstanceIdentifier, CompositeNode> configuration = new ConcurrentHashMap<YangInstanceIdentifier, CompositeNode>();
-    private final Map<YangInstanceIdentifier, CompositeNode> operational = new ConcurrentHashMap<YangInstanceIdentifier, CompositeNode>();
-
-    @Override
-    public boolean containsConfigurationPath(final YangInstanceIdentifier path) {
-        return configuration.containsKey(path);
-    }
-
-    @Override
-    public boolean containsOperationalPath(final YangInstanceIdentifier path) {
-        return operational.containsKey(path);
-    }
-
-    @Override
-    public Iterable<YangInstanceIdentifier> getStoredConfigurationPaths() {
-        return configuration.keySet();
-    }
-
-    @Override
-    public Iterable<YangInstanceIdentifier> getStoredOperationalPaths() {
-        return operational.keySet();
-    }
-
-    @Override
-    public CompositeNode readConfigurationData(final YangInstanceIdentifier path) {
-        LOG.trace("Reading configuration path {}", path);
-        return configuration.get(path);
-    }
-
-    @Override
-    public CompositeNode readOperationalData(YangInstanceIdentifier path) {
-        LOG.trace("Reading operational path {}", path);
-        return operational.get(path);
-    }
-
-    @Override
-    public DataCommitHandler.DataCommitTransaction<YangInstanceIdentifier, CompositeNode> requestCommit(
-            final DataModification<YangInstanceIdentifier, CompositeNode> modification) {
-        return new HashMapDataStoreTransaction(modification, this);
-    }
-
-    public RpcResult<Void> rollback(HashMapDataStoreTransaction transaction) {
-        return RpcResultBuilder.<Void> success().build();
-    }
-
-    public RpcResult<Void> finish(HashMapDataStoreTransaction transaction) {
-        final DataModification<YangInstanceIdentifier, CompositeNode> modification = transaction
-                .getModification();
-        for (final YangInstanceIdentifier removal : modification
-                .getRemovedConfigurationData()) {
-            LOG.trace("Removing configuration path {}", removal);
-            remove(configuration, removal);
-        }
-        for (final YangInstanceIdentifier removal : modification
-                .getRemovedOperationalData()) {
-            LOG.trace("Removing operational path {}", removal);
-            remove(operational, removal);
-        }
-        if (LOG.isTraceEnabled()) {
-            for (final YangInstanceIdentifier a : modification
-                    .getUpdatedConfigurationData().keySet()) {
-                LOG.trace("Adding configuration path {}", a);
-            }
-            for (final YangInstanceIdentifier a : modification
-                    .getUpdatedOperationalData().keySet()) {
-                LOG.trace("Adding operational path {}", a);
-            }
-        }
-        configuration.putAll(modification.getUpdatedConfigurationData());
-        operational.putAll(modification.getUpdatedOperationalData());
-
-        return RpcResultBuilder.<Void> success().build();
-    }
-
-    public void remove(final Map<YangInstanceIdentifier, CompositeNode> map,
-            final YangInstanceIdentifier identifier) {
-        Set<YangInstanceIdentifier> affected = new HashSet<YangInstanceIdentifier>();
-        for (final YangInstanceIdentifier path : map.keySet()) {
-            if (identifier.contains(path)) {
-                affected.add(path);
-            }
-        }
-        for (final YangInstanceIdentifier pathToRemove : affected) {
-            LOG.trace("Removed path {}", pathToRemove);
-            map.remove(pathToRemove);
-        }
-    }
-
-    @Override
-    public void close() {
-        // NOOP
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/HashMapDataStoreTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/HashMapDataStoreTransaction.java
deleted file mode 100644 (file)
index ee026b6..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.dom.broker.impl;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataModification;
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-public class HashMapDataStoreTransaction implements
-        DataCommitTransaction<YangInstanceIdentifier, CompositeNode> {
-    private final DataModification<YangInstanceIdentifier, CompositeNode> modification;
-    private final HashMapDataStore datastore;
-
-    HashMapDataStoreTransaction(
-            final DataModification<YangInstanceIdentifier, CompositeNode> modify,
-            final HashMapDataStore store) {
-        modification = modify;
-        datastore = store;
-    }
-
-    @Override
-    public RpcResult<Void> finish() throws IllegalStateException {
-        return datastore.finish(this);
-    }
-
-    @Override
-    public DataModification<YangInstanceIdentifier, CompositeNode> getModification() {
-        return this.modification;
-    }
-
-    @Override
-    public RpcResult<Void> rollback() throws IllegalStateException {
-        return datastore.rollback(this);
-    }
-}
\ No newline at end of file
index b1df7efcdbb6e4efb0ad6fdd2ecbf85aed2b98d1..e81f71a7d2069014b511b1162168131633d62c1a 100644 (file)
@@ -32,12 +32,6 @@ module opendaylight-sal-dom-broker-impl {
         config:provided-service sal:dom-async-data-broker;
     }
     
-    identity hash-map-data-store {
-        base config:module-type;
-        config:provided-service sal:dom-data-store;
-        config:java-name-prefix HashMapDataStore;
-    }
-    
     identity schema-service-singleton {
         base config:module-type;
         config:provided-service sal:schema-service;
@@ -129,12 +123,6 @@ module opendaylight-sal-dom-broker-impl {
         }
     }
     
-    augment "/config:modules/config:module/config:state" {
-        case hash-map-data-store {
-            when "/config:modules/config:module/config:type = 'hash-map-data-store'";
-        }
-    }
-    
     augment "/config:modules/config:module/config:state" {
         case schema-service-singleton {
             when "/config:modules/config:module/config:type = 'schema-service-singleton'";
@@ -149,4 +137,4 @@ module opendaylight-sal-dom-broker-impl {
             } 
         }
     }
-}
\ No newline at end of file
+}
index b421e56cf4eb82b7fccc3b7b20b17a2aaa263a4a..cde01573f2c68d64649d008bb070faad556a5b45 100644 (file)
@@ -1,7 +1,5 @@
 package org.opendaylight.xsql;
 
-import java.util.concurrent.ExecutionException;
-
 import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
 import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626.XSQL;
@@ -25,16 +23,15 @@ public class XSQLProvider implements AutoCloseable {
             XSQLBuilder builder = new XSQLBuilder();
             builder.setPort("34343");
             XSQL xsql = builder.build();
-            if (dps != null) {
-                final DataModificationTransaction t = dps.beginTransaction();
-                t.removeOperationalData(ID);
-                t.putOperationalData(ID,xsql);
-
-                try {
+            try {
+                if (dps != null) {
+                    final DataModificationTransaction t = dps.beginTransaction();
+                    t.removeOperationalData(ID);
+                    t.putOperationalData(ID,xsql);
                     t.commit().get();
-                } catch (InterruptedException | ExecutionException e) {
-                   LOG.warn("Failed to update toaster status, operational otherwise", e);
                 }
+            } catch (Exception e) {
+                LOG.warn("Failed to update XSQL port status, ", e);
             }
         return xsql;
     }
index 5fe9866b12ed0ec6e2a669de4cc7bd3b9957b641..25ddbf5df25dc065ba11bcc64cdf059cfbacb14c 100644 (file)
@@ -11,10 +11,8 @@ import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
-
 import java.util.Collection;
 import java.util.Map.Entry;
-
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
 import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.Builder;
 import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.SimpleEventFactory;
@@ -120,7 +118,9 @@ final class ResolveDataChangeEventsTask {
             Preconditions.checkArgument(node.getDataAfter().isPresent(),
                     "Modification at {} has type {} but no after-data", state.getPath(), node.getModificationType());
             if (!node.getDataBefore().isPresent()) {
-                resolveCreateEvent(state, node.getDataAfter().get());
+                @SuppressWarnings({ "unchecked", "rawtypes" })
+                final NormalizedNode<PathArgument, ?> afterNode = (NormalizedNode)node.getDataAfter().get();
+                resolveSameEventRecursivelly(state, afterNode, DOMImmutableDataChangeEvent.getCreateEventFactory());
                 return true;
             }
 
@@ -128,7 +128,10 @@ final class ResolveDataChangeEventsTask {
         case DELETE:
             Preconditions.checkArgument(node.getDataBefore().isPresent(),
                     "Modification at {} has type {} but no before-data", state.getPath(), node.getModificationType());
-            resolveDeleteEvent(state, node.getDataBefore().get());
+
+            @SuppressWarnings({ "unchecked", "rawtypes" })
+            final NormalizedNode<PathArgument, ?> beforeNode = (NormalizedNode)node.getDataBefore().get();
+            resolveSameEventRecursivelly(state, beforeNode, DOMImmutableDataChangeEvent.getRemoveEventFactory());
             return true;
         case UNMODIFIED:
             return false;
@@ -223,26 +226,6 @@ final class ResolveDataChangeEventsTask {
         return true;
     }
 
-    /**
-     * Resolves create events deep down the interest listener tree.
-     *
-     * @param path
-     * @param listeners
-     * @param afterState
-     * @return
-     */
-    private void resolveCreateEvent(final ResolveDataChangeState state, final NormalizedNode<?, ?> afterState) {
-        @SuppressWarnings({ "unchecked", "rawtypes" })
-        final NormalizedNode<PathArgument, ?> node = (NormalizedNode) afterState;
-        resolveSameEventRecursivelly(state, node, DOMImmutableDataChangeEvent.getCreateEventFactory());
-    }
-
-    private void resolveDeleteEvent(final ResolveDataChangeState state, final NormalizedNode<?, ?> beforeState) {
-        @SuppressWarnings({ "unchecked", "rawtypes" })
-        final NormalizedNode<PathArgument, ?> node = (NormalizedNode) beforeState;
-        resolveSameEventRecursivelly(state, node, DOMImmutableDataChangeEvent.getRemoveEventFactory());
-    }
-
     private void resolveSameEventRecursivelly(final ResolveDataChangeState state,
             final NormalizedNode<PathArgument, ?> node, final SimpleEventFactory eventFactory) {
         if (!state.needsProcessing()) {
@@ -277,6 +260,11 @@ final class ResolveDataChangeEventsTask {
         Preconditions.checkArgument(modification.getDataBefore().isPresent(), "Subtree change with before-data not present at path %s", state.getPath());
         Preconditions.checkArgument(modification.getDataAfter().isPresent(), "Subtree change with after-data not present at path %s", state.getPath());
 
+        if (!state.needsProcessing()) {
+            LOG.trace("Not processing modified subtree {}", state.getPath());
+            return true;
+        }
+
         DataChangeScope scope = null;
         for (DataTreeCandidateNode childMod : modification.getChildNodes()) {
             final ResolveDataChangeState childState = state.child(childMod.getIdentifier());
index c32d96084144e97c2bccd06bd7e6b30893a0bf29..a1bf2c806e7ad587cf63bc732d1ece972a05229d 100644 (file)
@@ -1,21 +1,21 @@
 <?xml version="1.0" encoding="UTF-8"?>\r
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">\r
 \r
-    <!--
-        Licensed to the Apache Software Foundation (ASF) under one or more
-        contributor license agreements.  See the NOTICE file distributed with
-        this work for additional information regarding copyright ownership.
-        The ASF licenses this file to You under the Apache License, Version 2.0
-        (the "License"); you may not use this file except in compliance with
-        the License.  You may obtain a copy of the License at
-
-            http://www.apache.org/licenses/LICENSE-2.0
-
-        Unless required by applicable law or agreed to in writing, software
-        distributed under the License is distributed on an "AS IS" BASIS,
-        WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-        See the License for the specific language governing permissions and
-        limitations under the License.
+    <!--\r
+        Licensed to the Apache Software Foundation (ASF) under one or more\r
+        contributor license agreements.  See the NOTICE file distributed with\r
+        this work for additional information regarding copyright ownership.\r
+        The ASF licenses this file to You under the Apache License, Version 2.0\r
+        (the "License"); you may not use this file except in compliance with\r
+        the License.  You may obtain a copy of the License at\r
+\r
+            http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+        Unless required by applicable law or agreed to in writing, software\r
+        distributed under the License is distributed on an "AS IS" BASIS,\r
+        WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+        See the License for the specific language governing permissions and\r
+        limitations under the License.\r
     -->\r
 \r
     <modelVersion>4.0.0</modelVersion>\r
     <version>1.1-SNAPSHOT</version>\r
   </parent>\r
 \r
-    <groupId>xsqlcommand</groupId>\r
-    <artifactId>xsqlcommand</artifactId>\r
+    <groupId>org.opendaylight.controller</groupId>\r
+    <artifactId>sal-karaf-xsql</artifactId>\r
     <packaging>bundle</packaging>\r
-    <version>1.0.0-SNAPSHOT</version>\r
     <name>Apache Karaf :: Shell odl/xsql Commands</name>\r
 \r
     <description>Provides the OSGi odl commands</description>\r
@@ -64,7 +63,6 @@
         <dependency>\r
             <groupId>org.opendaylight.controller</groupId>\r
             <artifactId>sal-dom-xsql</artifactId>\r
-            <type>bundle</type>\r
             <version>1.1-SNAPSHOT</version>\r
         </dependency>\r
     </dependencies>\r
diff --git a/opendaylight/md-sal/sal-karaf-xsql/src/main/resources/OSGI-INF/blueprint/shell-log.xml b/opendaylight/md-sal/sal-karaf-xsql/src/main/resources/OSGI-INF/blueprint/shell-log.xml
new file mode 100644 (file)
index 0000000..e9a4a23
--- /dev/null
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
+           xmlns:cm="http://aries.apache.org/blueprint/xmlns/blueprint-cm/v1.0.0">
+
+    <command-bundle xmlns="http://karaf.apache.org/xmlns/shell/v1.1.0">
+        <command>
+            <action class="org.opendaylight.controller.xsql.xsql">
+            </action>
+        </command>
+    </command-bundle>
+</blueprint>
index 2642116927cde25de425d8de95884cb5d0ae3b57..09e178f5ceaa4e4709efa51dc9267a41f65b2d3f 100644 (file)
@@ -8,11 +8,10 @@ import com.google.common.base.Splitter;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
-
+import java.net.URI;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
-
 import org.opendaylight.controller.netconf.client.NetconfClientSession;
 import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
 import org.opendaylight.yangtools.yang.common.QName;
@@ -119,10 +118,14 @@ public final class NetconfSessionCapabilities {
         return fromStrings(session.getServerCapabilities());
     }
 
-    private static final QName cachedQName(String namespace, String revision, String moduleName) {
+    private static QName cachedQName(final String namespace, final String revision, final String moduleName) {
         return QName.cachedReference(QName.create(namespace, revision, moduleName));
     }
 
+    private static QName cachedQName(final String namespace, final String moduleName) {
+        return QName.cachedReference(QName.create(URI.create(namespace), null, moduleName).withoutRevision());
+    }
+
     public static NetconfSessionCapabilities fromStrings(final Collection<String> capabilities) {
         final Set<QName> moduleBasedCaps = new HashSet<>();
         final Set<String> nonModuleCaps = Sets.newHashSet(capabilities);
@@ -142,8 +145,7 @@ public final class NetconfSessionCapabilities {
 
             String revision = REVISION_PARAM.from(queryParams);
             if (revision != null) {
-                moduleBasedCaps.add(cachedQName(namespace, revision, moduleName));
-                nonModuleCaps.remove(capability);
+                addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, revision, moduleName));
                 continue;
             }
 
@@ -151,21 +153,29 @@ public final class NetconfSessionCapabilities {
              * We have seen devices which mis-escape revision, but the revision may not
              * even be there. First check if there is a substring that matches revision.
              */
-            if (!Iterables.any(queryParams, CONTAINS_REVISION)) {
+            if (Iterables.any(queryParams, CONTAINS_REVISION)) {
+
+                LOG.debug("Netconf device was not reporting revision correctly, trying to get amp;revision=");
+                revision = BROKEN_REVISON_PARAM.from(queryParams);
+                if (revision == null) {
+                    LOG.warn("Netconf device returned revision incorrectly escaped for {}, ignoring it", capability);
+                    addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, moduleName));
+                } else {
+                    addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, revision, moduleName));
+                }
                 continue;
             }
 
-            LOG.debug("Netconf device was not reporting revision correctly, trying to get amp;revision=");
-            revision = BROKEN_REVISON_PARAM.from(queryParams);
-            if (revision == null) {
-                LOG.warn("Netconf device returned revision incorrectly escaped for {}, ignoring it", capability);
-            }
-
-            // FIXME: do we really want to continue here?
-            moduleBasedCaps.add(cachedQName(namespace, revision, moduleName));
-            nonModuleCaps.remove(capability);
+            // Fallback, no revision provided for module
+            addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, moduleName));
         }
 
         return new NetconfSessionCapabilities(ImmutableSet.copyOf(nonModuleCaps), ImmutableSet.copyOf(moduleBasedCaps));
     }
+
+
+    private static void addModuleQName(final Set<QName> moduleBasedCaps, final Set<String> nonModuleCaps, final String capability, final QName qName) {
+        moduleBasedCaps.add(qName);
+        nonModuleCaps.remove(capability);
+    }
 }
index 87947b57faf5b28bf840411503caf6b6353133ca..80bb08f5af399fee497465e8ad1e345726784b24 100644 (file)
@@ -43,6 +43,19 @@ public class NetconfSessionCapabilitiesTest {
         assertThat(merged.getNonModuleCaps(), JUnitMatchers.hasItem("urn:ietf:params:netconf:capability:rollback-on-error:1.0"));
     }
 
+    @Test
+    public void testCapabilityNoRevision() throws Exception {
+        final List<String> caps1 = Lists.newArrayList(
+                "namespace:2?module=module2",
+                "namespace:2?module=module2&amp;revision=2012-12-12",
+                "namespace:2?module=module1&amp;RANDOMSTRING;revision=2013-12-12",
+                "namespace:2?module=module2&amp;RANDOMSTRING;revision=2013-12-12" // This one should be ignored(same as first), since revision is in wrong format
+        );
+
+        final NetconfSessionCapabilities sessionCaps1 = NetconfSessionCapabilities.fromStrings(caps1);
+        assertCaps(sessionCaps1, 0, 3);
+    }
+
     private void assertCaps(final NetconfSessionCapabilities sessionCaps1, final int nonModuleCaps, final int moduleCaps) {
         assertEquals(nonModuleCaps, sessionCaps1.getNonModuleCaps().size());
         assertEquals(moduleCaps, sessionCaps1.getModuleBasedCaps().size());
index 2e355d4f5146b13c31ee9f4a66a1bbed757d9354..c82a72eaa56c82e40388b8fb612eed7b198dbae8 100644 (file)
@@ -38,8 +38,9 @@ public class RemoteRpcProviderFactory {
                         Thread.currentThread().getContextClassLoader());
 
         Config actorSystemConfig = config.get();
-        LOG.debug("Actor system configuration\n{}", actorSystemConfig.root().render());
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Actor system configuration\n{}", actorSystemConfig.root().render());
+        }
         if (config.isMetricCaptureEnabled()) {
             LOG.info("Instrumentation is enabled in actor system {}. Metrics can be viewed in JMX console.",
                     config.getActorSystemName());
index 98cf6a329f655f3caeebb39b989a1b761ca9a5cd..2aaac5a78ed531fc830bfca7540d2603a2e0f41b 100644 (file)
@@ -53,7 +53,9 @@ public class RoutedRpcListener implements RouteChangeListener<RpcRoutingContext,
    * @param announcements
    */
   private void announce(Set<RpcRouter.RouteIdentifier<?, ?, ?>> announcements) {
-    LOG.debug("Announcing [{}]", announcements);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Announcing [{}]", announcements);
+    }
     RpcRegistry.Messages.AddOrUpdateRoutes addRpcMsg = new RpcRegistry.Messages.AddOrUpdateRoutes(new ArrayList<>(announcements));
     rpcRegistry.tell(addRpcMsg, ActorRef.noSender());
   }
@@ -63,7 +65,9 @@ public class RoutedRpcListener implements RouteChangeListener<RpcRoutingContext,
    * @param removals
    */
   private void remove(Set<RpcRouter.RouteIdentifier<?, ?, ?>> removals){
-    LOG.debug("Removing [{}]", removals);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Removing [{}]", removals);
+    }
     RpcRegistry.Messages.RemoveRoutes removeRpcMsg = new RpcRegistry.Messages.RemoveRoutes(new ArrayList<>(removals));
     rpcRegistry.tell(removeRpcMsg, ActorRef.noSender());
   }
index 6b02235dc7d218c967fbd1e7d1d1a087d3304a30..2046e419d9f2602b444becf6986fe2c73bb9756e 100644 (file)
@@ -79,8 +79,9 @@ public class RpcBroker extends AbstractUntypedActor {
     }
 
     private void invokeRemoteRpc(final InvokeRpc msg) {
-        LOG.debug("Looking up the remote actor for rpc {}", msg.getRpc());
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Looking up the remote actor for rpc {}", msg.getRpc());
+        }
         RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(
                 null, msg.getRpc(), msg.getIdentifier());
         RpcRegistry.Messages.FindRouters findMsg = new RpcRegistry.Messages.FindRouters(routeId);
@@ -147,8 +148,9 @@ public class RpcBroker extends AbstractUntypedActor {
     }
 
     private void executeRpc(final ExecuteRpc msg) {
-        LOG.debug("Executing rpc {}", msg.getRpc());
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Executing rpc {}", msg.getRpc());
+        }
         Future<RpcResult<CompositeNode>> future = brokerSession.rpc(msg.getRpc(),
                 XmlUtils.inputXmlToCompositeNode(msg.getRpc(), msg.getInputCompositeNode(),
                         schemaContext));
index dee98521ae9f2d56893d591b30bc30a8489d89ef..22879dda2f903f6008c5a7c21a2a6447acca12bf 100644 (file)
@@ -31,7 +31,9 @@ public class RpcListener implements RpcRegistrationListener{
 
   @Override
   public void onRpcImplementationAdded(QName rpc) {
-    LOG.debug("Adding registration for [{}]", rpc);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Adding registration for [{}]", rpc);
+    }
     RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(null, rpc, null);
     List<RpcRouter.RouteIdentifier<?,?,?>> routeIds = new ArrayList<>();
     routeIds.add(routeId);
@@ -41,7 +43,9 @@ public class RpcListener implements RpcRegistrationListener{
 
   @Override
   public void onRpcImplementationRemoved(QName rpc) {
-    LOG.debug("Removing registration for [{}]", rpc);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Removing registration for [{}]", rpc);
+    }
     RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(null, rpc, null);
     List<RpcRouter.RouteIdentifier<?,?,?>> routeIds = new ArrayList<>();
     routeIds.add(routeId);
index abe2008c2936cb1ed12b8624f072b65c33d8c518..48ccd824d41cf6b1456007012d6801d028443fce 100644 (file)
@@ -25,7 +25,9 @@ public class TerminationMonitor extends UntypedActor{
     @Override public void onReceive(Object message) throws Exception {
         if(message instanceof Terminated){
             Terminated terminated = (Terminated) message;
-            LOG.debug("Actor terminated : {}", terminated.actor());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Actor terminated : {}", terminated.actor());
+            }
         }else if(message instanceof Monitor){
           Monitor monitor = (Monitor) message;
           getContext().watch(monitor.getActorRef());
index 3de3fc00d0328215ef8f127e8272458f14326708..b50dfb1ba3e196f66e33d74438d43a2aca2d81ee 100644 (file)
@@ -111,7 +111,9 @@ public class BucketStore extends AbstractUntypedActorWithMetering {
             receiveUpdateRemoteBuckets(
                     ((UpdateRemoteBuckets) message).getBuckets());
         } else {
-            log.debug("Unhandled message [{}]", message);
+            if(log.isDebugEnabled()) {
+                log.debug("Unhandled message [{}]", message);
+            }
             unhandled(message);
         }
     }
@@ -236,8 +238,9 @@ public class BucketStore extends AbstractUntypedActorWithMetering {
                 versions.put(entry.getKey(), remoteVersion);
             }
         }
-
-        log.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets);
+        if(log.isDebugEnabled()) {
+            log.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets);
+        }
     }
 
     ///
index 85c6ebe26f859e0751122d4ab60065a0f1b48aba..1bbcc69f5ed4d5fa6d7d8ea773823c97c9bb6e05 100644 (file)
@@ -170,7 +170,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
         }
 
         clusterMembers.remove(member.address());
-        log.debug("Removed member [{}], Active member list [{}]", member.address(), clusterMembers);
+        if(log.isDebugEnabled()) {
+            log.debug("Removed member [{}], Active member list [{}]", member.address(), clusterMembers);
+        }
     }
 
     /**
@@ -184,8 +186,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
 
         if (!clusterMembers.contains(member.address()))
             clusterMembers.add(member.address());
-
-        log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers);
+        if(log.isDebugEnabled()) {
+            log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers);
+        }
     }
 
     /**
@@ -205,8 +208,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
             Integer randomIndex = ThreadLocalRandom.current().nextInt(0, clusterMembers.size());
             remoteMemberToGossipTo = clusterMembers.get(randomIndex);
         }
-
-        log.debug("Gossiping to [{}]", remoteMemberToGossipTo);
+        if(log.isDebugEnabled()) {
+            log.debug("Gossiping to [{}]", remoteMemberToGossipTo);
+        }
         getLocalStatusAndSendTo(remoteMemberToGossipTo);
     }
 
@@ -244,7 +248,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
     void receiveGossip(GossipEnvelope envelope){
         //TODO: Add more validations
         if (!selfAddress.equals(envelope.to())) {
-            log.debug("Ignoring message intended for someone else. From [{}] to [{}]", envelope.from(), envelope.to());
+            if(log.isDebugEnabled()) {
+                log.debug("Ignoring message intended for someone else. From [{}] to [{}]", envelope.from(), envelope.to());
+            }
             return;
         }
 
@@ -291,7 +297,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
         ActorSelection remoteRef = getContext().system().actorSelection(
                 remoteActorSystemAddress.toString() + getSelf().path().toStringWithoutAddress());
 
-        log.debug("Sending bucket versions to [{}]", remoteRef);
+        if(log.isDebugEnabled()) {
+            log.debug("Sending bucket versions to [{}]", remoteRef);
+        }
 
         futureReply.map(getMapperToSendLocalStatus(remoteRef), getContext().dispatcher());
 
@@ -416,7 +424,9 @@ public class Gossiper extends AbstractUntypedActorWithMetering {
             public Void apply(Object msg) {
                 if (msg instanceof GetBucketsByMembersReply) {
                     Map<Address, Bucket> buckets = ((GetBucketsByMembersReply) msg).getBuckets();
-                    log.debug("Buckets to send from {}: {}", selfAddress, buckets);
+                    if(log.isDebugEnabled()) {
+                        log.debug("Buckets to send from {}: {}", selfAddress, buckets);
+                    }
                     GossipEnvelope envelope = new GossipEnvelope(selfAddress, sender.path().address(), buckets);
                     sender.tell(envelope, getSelf());
                 }
index 4a46a3c26712c8e54da6650bd4e68b2de9ad91ca..7f8f0a1d0e32478d4b977cefab1398a252c52c2a 100644 (file)
@@ -60,31 +60,31 @@ public interface RestconfService {
 
     @GET
     @Path("/modules")
-    @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+    @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
             MediaType.APPLICATION_XML, MediaType.TEXT_XML })
     public StructuredData getModules(@Context UriInfo uriInfo);
 
     @GET
     @Path("/modules/{identifier:.+}")
-    @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+    @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
             MediaType.APPLICATION_XML, MediaType.TEXT_XML })
     public StructuredData getModules(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
 
     @GET
     @Path("/modules/module/{identifier:.+}")
-    @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+    @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
             MediaType.APPLICATION_XML, MediaType.TEXT_XML })
     public StructuredData getModule(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
 
     @GET
     @Path("/operations")
-    @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+    @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
             MediaType.APPLICATION_XML, MediaType.TEXT_XML })
     public StructuredData getOperations(@Context UriInfo uriInfo);
 
     @GET
     @Path("/operations/{identifier:.+}")
-    @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+    @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
             MediaType.APPLICATION_XML, MediaType.TEXT_XML })
     public StructuredData getOperations(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
 
@@ -149,7 +149,7 @@ public interface RestconfService {
 
     @GET
     @Path("/streams")
-    @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+    @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
             MediaType.APPLICATION_XML, MediaType.TEXT_XML })
     public StructuredData getAvailableStreams(@Context UriInfo uriInfo);
 
index 63a5b1b54055a81cd3f8f3f736365c4e99e1d8b8..10201ab6f5148c78480b9cceea34b77766f0027e 100644 (file)
@@ -78,16 +78,19 @@ public class RestconfDocumentedExceptionMapper implements ExceptionMapper<Restco
 
         LOG.debug("In toResponse: {}", exception.getMessage());
 
-        // Default to the content type if there's no Accept header
 
-        MediaType mediaType = headers.getMediaType();
 
         List<MediaType> accepts = headers.getAcceptableMediaTypes();
+        accepts.remove(MediaType.WILDCARD_TYPE);
 
         LOG.debug("Accept headers: {}", accepts);
 
+        final MediaType mediaType;
         if (accepts != null && accepts.size() > 0) {
             mediaType = accepts.get(0); // just pick the first one
+        } else {
+            // Default to the content type if there's no Accept header
+            mediaType = MediaType.APPLICATION_JSON_TYPE;
         }
 
         LOG.debug("Using MediaType: {}", mediaType);
index 2e2100b2874abfd1f05134e38b223e0f401ebc80..e0a51a8e3c0a7d46cfbda3447d42aa0cba64bc6d 100644 (file)
@@ -4,6 +4,11 @@
          xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <modelVersion>4.0.0</modelVersion>
 
+  <parent>
+     <groupId>org.opendaylight.controller.samples</groupId>
+     <artifactId>sal-samples</artifactId>
+     <version>1.1-SNAPSHOT</version>
+  </parent>
 
   <artifactId>l2switch.aggregator</artifactId>
   <groupId>org.opendaylight.controller.samples.l2switch</groupId>
index fe1813a19943dfe7f3446b55f9c49b1986df6729..57313d2948960b038791d75a5fdb9287cce41530 100644 (file)
       <artifactId>org.osgi.core</artifactId>
       <scope>provided</scope>
     </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>
index c1996f4691632637abc9fc7dffacce0bcb12f2ad..361373d78da93f114f51c887ab95c78bc6ab3265 100644 (file)
@@ -15,9 +15,9 @@ import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMap
 import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNode;
 import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNodeId;
 
+import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
-
 import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
@@ -50,17 +50,19 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 
 class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, OpendaylightInventoryListener {
 
-    private final Logger LOG = LoggerFactory.getLogger(FlowCapableTopologyExporter.class);
+    private static final Logger LOG = LoggerFactory.getLogger(FlowCapableTopologyExporter.class);
     private final InstanceIdentifier<Topology> topology;
     private final OperationProcessor processor;
 
-    FlowCapableTopologyExporter(final OperationProcessor processor, final InstanceIdentifier<Topology> topology) {
+    FlowCapableTopologyExporter(final OperationProcessor processor,
+            final InstanceIdentifier<Topology> topology) {
         this.processor = Preconditions.checkNotNull(processor);
         this.topology = Preconditions.checkNotNull(topology);
     }
@@ -73,15 +75,14 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
 
         processor.enqueueOperation(new TopologyOperation() {
             @Override
-            public void applyOperation(final ReadWriteTransaction transaction) {
-                removeAffectedLinks(nodeId);
+            public void applyOperation(ReadWriteTransaction transaction) {
+                removeAffectedLinks(nodeId, transaction);
+                transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance);
             }
-        });
 
-        processor.enqueueOperation(new TopologyOperation() {
             @Override
-            public void applyOperation(ReadWriteTransaction transaction) {
-                transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance);
+            public String toString() {
+                return "onNodeRemoved";
             }
         });
     }
@@ -97,6 +98,11 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
                     final InstanceIdentifier<Node> path = getNodePath(toTopologyNodeId(notification.getId()));
                     transaction.merge(LogicalDatastoreType.OPERATIONAL, path, node, true);
                 }
+
+                @Override
+                public String toString() {
+                    return "onNodeUpdated";
+                }
             });
         }
     }
@@ -104,28 +110,30 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
     @Override
     public void onNodeConnectorRemoved(final NodeConnectorRemoved notification) {
 
-        final InstanceIdentifier<TerminationPoint> tpInstance = toTerminationPointIdentifier(notification
-                .getNodeConnectorRef());
+        final InstanceIdentifier<TerminationPoint> tpInstance = toTerminationPointIdentifier(
+                notification.getNodeConnectorRef());
 
-        processor.enqueueOperation(new TopologyOperation() {
-            @Override
-            public void applyOperation(final ReadWriteTransaction transaction) {
-                final TpId tpId = toTerminationPointId(getNodeConnectorKey(notification.getNodeConnectorRef()).getId());
-                removeAffectedLinks(tpId);
-            }
-        });
+        final TpId tpId = toTerminationPointId(getNodeConnectorKey(
+                notification.getNodeConnectorRef()).getId());
 
         processor.enqueueOperation(new TopologyOperation() {
             @Override
             public void applyOperation(ReadWriteTransaction transaction) {
+                removeAffectedLinks(tpId, transaction);
                 transaction.delete(LogicalDatastoreType.OPERATIONAL, tpInstance);
             }
+
+            @Override
+            public String toString() {
+                return "onNodeConnectorRemoved";
+            }
         });
     }
 
     @Override
     public void onNodeConnectorUpdated(final NodeConnectorUpdated notification) {
-        final FlowCapableNodeConnectorUpdated fcncu = notification.getAugmentation(FlowCapableNodeConnectorUpdated.class);
+        final FlowCapableNodeConnectorUpdated fcncu = notification.getAugmentation(
+                FlowCapableNodeConnectorUpdated.class);
         if (fcncu != null) {
             processor.enqueueOperation(new TopologyOperation() {
                 @Override
@@ -137,9 +145,14 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
                     transaction.merge(LogicalDatastoreType.OPERATIONAL, path, point, true);
                     if ((fcncu.getState() != null && fcncu.getState().isLinkDown())
                             || (fcncu.getConfiguration() != null && fcncu.getConfiguration().isPORTDOWN())) {
-                        removeAffectedLinks(point.getTpId());
+                        removeAffectedLinks(point.getTpId(), transaction);
                     }
                 }
+
+                @Override
+                public String toString() {
+                    return "onNodeConnectorUpdated";
+                }
             });
         }
     }
@@ -153,6 +166,11 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
                 final InstanceIdentifier<Link> path = linkPath(link);
                 transaction.merge(LogicalDatastoreType.OPERATIONAL, path, link, true);
             }
+
+            @Override
+            public String toString() {
+                return "onLinkDiscovered";
+            }
         });
     }
 
@@ -168,6 +186,11 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
             public void applyOperation(final ReadWriteTransaction transaction) {
                 transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(toTopologyLink(notification)));
             }
+
+            @Override
+            public String toString() {
+                return "onLinkRemoved";
+            }
         });
     }
 
@@ -188,62 +211,92 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
         return tpPath(toTopologyNodeId(invNodeKey.getId()), toTerminationPointId(invNodeConnectorKey.getId()));
     }
 
-    private void removeAffectedLinks(final NodeId id) {
-        processor.enqueueOperation(new TopologyOperation() {
+    private void removeAffectedLinks(final NodeId id, final ReadWriteTransaction transaction) {
+        CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture =
+                transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
+        Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
             @Override
-            public void applyOperation(final ReadWriteTransaction transaction) {
-                CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture = transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
-                Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
-                    @Override
-                    public void onSuccess(Optional<Topology> topologyOptional) {
-                        if (topologyOptional.isPresent()) {
-                            List<Link> linkList = topologyOptional.get().getLink() != null
-                                    ? topologyOptional.get().getLink() : Collections.<Link> emptyList();
-                            for (Link link : linkList) {
-                                if (id.equals(link.getSource().getSourceNode()) || id.equals(link.getDestination().getDestNode())) {
-                                    transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
-                                }
-                            }
-                        }
-                    }
+            public void onSuccess(Optional<Topology> topologyOptional) {
+                removeAffectedLinks(id, topologyOptional);
+            }
 
-                    @Override
-                    public void onFailure(Throwable throwable) {
-                        LOG.error("Error reading topology data for topology {}", topology, throwable);
-                    }
-                });
+            @Override
+            public void onFailure(Throwable throwable) {
+                LOG.error("Error reading topology data for topology {}", topology, throwable);
             }
         });
     }
 
-    private void removeAffectedLinks(final TpId id) {
-        processor.enqueueOperation(new TopologyOperation() {
-            @Override
-            public void applyOperation(final ReadWriteTransaction transaction) {
-                CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture = transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
-                Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
-                    @Override
-                    public void onSuccess(Optional<Topology> topologyOptional) {
-                        if (topologyOptional.isPresent()) {
-                            List<Link> linkList = topologyOptional.get().getLink() != null
-                                    ? topologyOptional.get().getLink() : Collections.<Link> emptyList();
-                            for (Link link : linkList) {
-                                if (id.equals(link.getSource().getSourceTp()) || id.equals(link.getDestination().getDestTp())) {
-                                    transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
-                                }
-                            }
-                        }
-                    }
+    private void removeAffectedLinks(final NodeId id, Optional<Topology> topologyOptional) {
+        if (!topologyOptional.isPresent()) {
+            return;
+        }
+
+        List<Link> linkList = topologyOptional.get().getLink() != null ?
+                topologyOptional.get().getLink() : Collections.<Link> emptyList();
+        final List<InstanceIdentifier<Link>> linkIDsToDelete = Lists.newArrayList();
+        for (Link link : linkList) {
+            if (id.equals(link.getSource().getSourceNode()) ||
+                    id.equals(link.getDestination().getDestNode())) {
+                linkIDsToDelete.add(linkPath(link));
+            }
+        }
+
+        enqueueLinkDeletes(linkIDsToDelete);
+    }
 
-                    @Override
-                    public void onFailure(Throwable throwable) {
-                        LOG.error("Error reading topology data for topology {}", topology, throwable);
+    private void enqueueLinkDeletes(final Collection<InstanceIdentifier<Link>> linkIDsToDelete) {
+        if(!linkIDsToDelete.isEmpty()) {
+            processor.enqueueOperation(new TopologyOperation() {
+                @Override
+                public void applyOperation(ReadWriteTransaction transaction) {
+                    for(InstanceIdentifier<Link> linkID: linkIDsToDelete) {
+                        transaction.delete(LogicalDatastoreType.OPERATIONAL, linkID);
                     }
-                });
+                }
+
+                @Override
+                public String toString() {
+                    return "Delete Links " + linkIDsToDelete.size();
+                }
+            });
+        }
+    }
+
+    private void removeAffectedLinks(final TpId id, final ReadWriteTransaction transaction) {
+        CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture =
+                transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
+        Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
+            @Override
+            public void onSuccess(Optional<Topology> topologyOptional) {
+                removeAffectedLinks(id, topologyOptional);
+            }
+
+            @Override
+            public void onFailure(Throwable throwable) {
+                LOG.error("Error reading topology data for topology {}", topology, throwable);
             }
         });
     }
 
+    private void removeAffectedLinks(final TpId id, Optional<Topology> topologyOptional) {
+        if (!topologyOptional.isPresent()) {
+            return;
+        }
+
+        List<Link> linkList = topologyOptional.get().getLink() != null
+                ? topologyOptional.get().getLink() : Collections.<Link> emptyList();
+        final List<InstanceIdentifier<Link>> linkIDsToDelete = Lists.newArrayList();
+        for (Link link : linkList) {
+            if (id.equals(link.getSource().getSourceTp()) ||
+                    id.equals(link.getDestination().getDestTp())) {
+                linkIDsToDelete.add(linkPath(link));
+            }
+        }
+
+        enqueueLinkDeletes(linkIDsToDelete);
+    }
+
     private InstanceIdentifier<Node> getNodePath(final NodeId nodeId) {
         return topology.child(Node.class, new NodeKey(nodeId));
     }
index 1cf648eb975c521d6c81e22b927e8f276f888e99..f09da0045930cf7cc843de1a924e64841f2db508 100644 (file)
@@ -11,14 +11,17 @@ import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
+
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
+
 import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
 import org.opendaylight.controller.md.sal.binding.api.DataBroker;
 import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -50,9 +53,9 @@ final class OperationProcessor implements AutoCloseable, Runnable, TransactionCh
             for (; ; ) {
                 TopologyOperation op = queue.take();
 
-                LOG.debug("New operations available, starting transaction");
-                final ReadWriteTransaction tx = transactionChain.newReadWriteTransaction();
+                LOG.debug("New {} operation available, starting transaction", op);
 
+                final ReadWriteTransaction tx = transactionChain.newReadWriteTransaction();
 
                 int ops = 0;
                 do {
@@ -64,14 +67,16 @@ final class OperationProcessor implements AutoCloseable, Runnable, TransactionCh
                     } else {
                         op = null;
                     }
+
+                    LOG.debug("Next operation {}", op);
                 } while (op != null);
 
                 LOG.debug("Processed {} operations, submitting transaction", ops);
 
-                final CheckedFuture txResultFuture = tx.submit();
-                Futures.addCallback(txResultFuture, new FutureCallback() {
+                CheckedFuture<Void, TransactionCommitFailedException> txResultFuture = tx.submit();
+                Futures.addCallback(txResultFuture, new FutureCallback<Void>() {
                     @Override
-                    public void onSuccess(Object o) {
+                    public void onSuccess(Void notUsed) {
                         LOG.debug("Topology export successful for tx :{}", tx.getIdentifier());
                     }
 
diff --git a/opendaylight/md-sal/topology-manager/src/test/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporterTest.java b/opendaylight/md-sal/topology-manager/src/test/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporterTest.java
new file mode 100644 (file)
index 0000000..b7a56a4
--- /dev/null
@@ -0,0 +1,666 @@
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.md.controller.topology.manager;
+
+import static org.junit.Assert.fail;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.InOrder;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkDiscoveredBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.PortConfig;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.flow.capable.port.StateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.LinkId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TpId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Destination;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.DestinationBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Source;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.SourceBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPoint;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPointKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.common.util.concurrent.Uninterruptibles;
+
+public class FlowCapableTopologyExporterTest {
+
+    @Mock
+    private DataBroker mockDataBroker;
+
+    @Mock
+    private BindingTransactionChain mockTxChain;
+
+    private OperationProcessor processor;
+
+    private FlowCapableTopologyExporter exporter;
+
+    private InstanceIdentifier<Topology> topologyIID;
+
+    private final ExecutorService executor = Executors.newFixedThreadPool(1);
+
+    @Before
+    public void setUp() {
+        MockitoAnnotations.initMocks(this);
+
+        doReturn(mockTxChain).when(mockDataBroker)
+                .createTransactionChain(any(TransactionChainListener.class));
+
+        processor = new OperationProcessor(mockDataBroker);
+
+        topologyIID = InstanceIdentifier.create(NetworkTopology.class)
+                .child(Topology.class, new TopologyKey(new TopologyId("test")));
+        exporter = new FlowCapableTopologyExporter(processor, topologyIID);
+
+        executor.execute(processor);
+    }
+
+    @After
+    public void tearDown() {
+        executor.shutdownNow();
+    }
+
+    @SuppressWarnings({ "rawtypes" })
+    @Test
+    public void testOnNodeRemoved() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                nodeKey = newInvNodeKey("node1");
+        InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+                org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+                nodeKey);
+
+        List<Link> linkList = Arrays.asList(
+                newLink("link1", newSourceNode("node1"), newDestNode("dest")),
+                newLink("link2", newSourceNode("source"), newDestNode("node1")),
+                newLink("link2", newSourceNode("source2"), newDestNode("dest2")));
+        final Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+        InstanceIdentifier[] expDeletedIIDs = {
+                topologyIID.child(Link.class, linkList.get(0).getKey()),
+                topologyIID.child(Link.class, linkList.get(1).getKey()),
+                topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+            };
+
+        SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+        ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
+        doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
+                .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+
+        CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
+
+        int expDeleteCalls = expDeletedIIDs.length;
+        CountDownLatch deleteLatch = new CountDownLatch(expDeleteCalls);
+        ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+                ArgumentCaptor.forClass(InstanceIdentifier.class);
+        setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
+
+        ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class);
+        setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch);
+        CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2);
+
+        doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build());
+
+        waitForSubmit(submitLatch1);
+
+        setReadFutureAsync(topology, readFuture);
+
+        waitForDeletes(expDeleteCalls, deleteLatch);
+
+        waitForSubmit(submitLatch2);
+
+        assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+
+        verifyMockTx(mockTx1);
+        verifyMockTx(mockTx2);
+    }
+
+    @SuppressWarnings({ "rawtypes" })
+    @Test
+    public void testOnNodeRemovedWithNoTopology() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                nodeKey = newInvNodeKey("node1");
+        InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+                org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+                nodeKey);
+
+        InstanceIdentifier[] expDeletedIIDs = {
+                topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+            };
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        doReturn(Futures.immediateCheckedFuture(Optional.absent())).when(mockTx)
+                .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+        CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+
+        CountDownLatch deleteLatch = new CountDownLatch(1);
+        ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+                ArgumentCaptor.forClass(InstanceIdentifier.class);
+        setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build());
+
+        waitForSubmit(submitLatch);
+
+        waitForDeletes(1, deleteLatch);
+
+        assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Test
+    public void testOnNodeConnectorRemoved() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                  nodeKey = newInvNodeKey("node1");
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+                newInvNodeConnKey("tp1");
+
+        InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+        List<Link> linkList = Arrays.asList(
+                newLink("link1", newSourceTp("tp1"), newDestTp("dest")),
+                newLink("link2", newSourceTp("source"), newDestTp("tp1")),
+                newLink("link3", newSourceTp("source2"), newDestTp("dest2")));
+        final Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+        InstanceIdentifier[] expDeletedIIDs = {
+                topologyIID.child(Link.class, linkList.get(0).getKey()),
+                topologyIID.child(Link.class, linkList.get(1).getKey()),
+                topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+                        .child(TerminationPoint.class, new TerminationPointKey(new TpId("tp1")))
+            };
+
+        final SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+        ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
+        doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
+                .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+
+        CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
+
+        int expDeleteCalls = expDeletedIIDs.length;
+        CountDownLatch deleteLatch = new CountDownLatch(expDeleteCalls);
+        ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+                ArgumentCaptor.forClass(InstanceIdentifier.class);
+        setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
+
+        ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class);
+        setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch);
+        CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2);
+
+        doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef(
+                new NodeConnectorRef(invNodeConnID)).build());
+
+        waitForSubmit(submitLatch1);
+
+        setReadFutureAsync(topology, readFuture);
+
+        waitForDeletes(expDeleteCalls, deleteLatch);
+
+        waitForSubmit(submitLatch2);
+
+        assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+
+        verifyMockTx(mockTx1);
+        verifyMockTx(mockTx2);
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Test
+    public void testOnNodeConnectorRemovedWithNoTopology() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                  nodeKey = newInvNodeKey("node1");
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+                newInvNodeConnKey("tp1");
+
+        InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+        InstanceIdentifier[] expDeletedIIDs = {
+                topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+                        .child(TerminationPoint.class, new TerminationPointKey(new TpId("tp1")))
+            };
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        doReturn(Futures.immediateCheckedFuture(Optional.absent())).when(mockTx)
+                .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+        CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+
+        CountDownLatch deleteLatch = new CountDownLatch(1);
+        ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+                ArgumentCaptor.forClass(InstanceIdentifier.class);
+        setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef(
+                new NodeConnectorRef(invNodeConnID)).build());
+
+        waitForSubmit(submitLatch);
+
+        waitForDeletes(1, deleteLatch);
+
+        assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+    }
+
+    @Test
+    public void testOnNodeUpdated() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                            nodeKey = newInvNodeKey("node1");
+        InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+                org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+                nodeKey);
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeUpdated(new NodeUpdatedBuilder().setNodeRef(new NodeRef(invNodeID))
+                .setId(nodeKey.getId()).addAugmentation(FlowCapableNodeUpdated.class,
+                        new FlowCapableNodeUpdatedBuilder().build()).build());
+
+        waitForSubmit(submitLatch);
+
+        ArgumentCaptor<Node> mergedNode = ArgumentCaptor.forClass(Node.class);
+        NodeId expNodeId = new NodeId("node1");
+        verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(Node.class,
+                new NodeKey(expNodeId))), mergedNode.capture(), eq(true));
+        assertEquals("getNodeId", expNodeId, mergedNode.getValue().getNodeId());
+        InventoryNode augmentation = mergedNode.getValue().getAugmentation(InventoryNode.class);
+        assertNotNull("Missing augmentation", augmentation);
+        assertEquals("getInventoryNodeRef", new NodeRef(invNodeID), augmentation.getInventoryNodeRef());
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Test
+    public void testOnNodeConnectorUpdated() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                 nodeKey = newInvNodeKey("node1");
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+                newInvNodeConnKey("tp1");
+
+        InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+                new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+                        FlowCapableNodeConnectorUpdated.class,
+                        new FlowCapableNodeConnectorUpdatedBuilder().build()).build());
+
+        waitForSubmit(submitLatch);
+
+        ArgumentCaptor<TerminationPoint> mergedNode = ArgumentCaptor.forClass(TerminationPoint.class);
+        NodeId expNodeId = new NodeId("node1");
+        TpId expTpId = new TpId("tp1");
+        InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+                Node.class, new NodeKey(expNodeId)).child(TerminationPoint.class,
+                        new TerminationPointKey(expTpId));
+        verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+                mergedNode.capture(), eq(true));
+        assertEquals("getTpId", expTpId, mergedNode.getValue().getTpId());
+        InventoryNodeConnector augmentation = mergedNode.getValue().getAugmentation(
+                InventoryNodeConnector.class);
+        assertNotNull("Missing augmentation", augmentation);
+        assertEquals("getInventoryNodeConnectorRef", new NodeConnectorRef(invNodeConnID),
+                augmentation.getInventoryNodeConnectorRef());
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Test
+    public void testOnNodeConnectorUpdatedWithLinkStateDown() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                 nodeKey = newInvNodeKey("node1");
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+                newInvNodeConnKey("tp1");
+
+        InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+        List<Link> linkList = Arrays.asList(newLink("link1", newSourceTp("tp1"), newDestTp("dest")));
+        Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        doReturn(Futures.immediateCheckedFuture(Optional.of(topology))).when(mockTx)
+                .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+        setupStubbedSubmit(mockTx);
+
+        CountDownLatch deleteLatch = new CountDownLatch(1);
+        ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+                ArgumentCaptor.forClass(InstanceIdentifier.class);
+        setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+                new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+                        FlowCapableNodeConnectorUpdated.class,
+                        new FlowCapableNodeConnectorUpdatedBuilder().setState(
+                                new StateBuilder().setLinkDown(true).build()).build()).build());
+
+        waitForDeletes(1, deleteLatch);
+
+        InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+                Node.class, new NodeKey(new NodeId("node1"))).child(TerminationPoint.class,
+                        new TerminationPointKey(new TpId("tp1")));
+
+        verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+                any(TerminationPoint.class), eq(true));
+
+        assertDeletedIDs(new InstanceIdentifier[]{topologyIID.child(Link.class,
+                linkList.get(0).getKey())}, deletedLinkIDs);
+    }
+
+
+    @SuppressWarnings("rawtypes")
+    @Test
+    public void testOnNodeConnectorUpdatedWithPortDown() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                 nodeKey = newInvNodeKey("node1");
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+                newInvNodeConnKey("tp1");
+
+        InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+        List<Link> linkList = Arrays.asList(newLink("link1", newSourceTp("tp1"), newDestTp("dest")));
+        Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        doReturn(Futures.immediateCheckedFuture(Optional.of(topology))).when(mockTx)
+                .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+        setupStubbedSubmit(mockTx);
+
+        CountDownLatch deleteLatch = new CountDownLatch(1);
+        ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+                ArgumentCaptor.forClass(InstanceIdentifier.class);
+        setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+                new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+                        FlowCapableNodeConnectorUpdated.class,
+                        new FlowCapableNodeConnectorUpdatedBuilder().setConfiguration(
+                                new PortConfig(true, true, true, true)).build()).build());
+
+        waitForDeletes(1, deleteLatch);
+
+        InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+                Node.class, new NodeKey(new NodeId("node1"))).child(TerminationPoint.class,
+                        new TerminationPointKey(new TpId("tp1")));
+
+        verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+                any(TerminationPoint.class), eq(true));
+
+        assertDeletedIDs(new InstanceIdentifier[]{topologyIID.child(Link.class,
+                linkList.get(0).getKey())}, deletedLinkIDs);
+    }
+
+    @Test
+    public void testOnLinkDiscovered() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                sourceNodeKey = newInvNodeKey("sourceNode");
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+                sourceNodeConnKey = newInvNodeConnKey("sourceTP");
+        InstanceIdentifier<?> sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey);
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                destNodeKey = newInvNodeKey("destNode");
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+                destNodeConnKey = newInvNodeConnKey("destTP");
+        InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onLinkDiscovered(new LinkDiscoveredBuilder().setSource(
+                new NodeConnectorRef(sourceConnID)).setDestination(
+                        new NodeConnectorRef(destConnID)).build());
+
+        waitForSubmit(submitLatch);
+
+        ArgumentCaptor<Link> mergedNode = ArgumentCaptor.forClass(Link.class);
+        verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(
+                Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId())))),
+                mergedNode.capture(), eq(true));
+        assertEquals("Source node ID", "sourceNode",
+                mergedNode.getValue().getSource().getSourceNode().getValue());
+        assertEquals("Dest TP ID", "sourceTP",
+                mergedNode.getValue().getSource().getSourceTp().getValue());
+        assertEquals("Dest node ID", "destNode",
+                mergedNode.getValue().getDestination().getDestNode().getValue());
+        assertEquals("Dest TP ID", "destTP",
+                mergedNode.getValue().getDestination().getDestTp().getValue());
+    }
+
+    @Test
+    public void testOnLinkRemoved() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                sourceNodeKey = newInvNodeKey("sourceNode");
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+                sourceNodeConnKey = newInvNodeConnKey("sourceTP");
+        InstanceIdentifier<?> sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey);
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                destNodeKey = newInvNodeKey("destNode");
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+                destNodeConnKey = newInvNodeConnKey("destTP");
+        InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onLinkRemoved(new LinkRemovedBuilder().setSource(
+                new NodeConnectorRef(sourceConnID)).setDestination(
+                        new NodeConnectorRef(destConnID)).build());
+
+        waitForSubmit(submitLatch);
+
+        verify(mockTx).delete(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+                Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
+    }
+
+    private void verifyMockTx(ReadWriteTransaction mockTx) {
+        InOrder inOrder = inOrder(mockTx);
+        inOrder.verify(mockTx, atLeast(0)).submit();
+        inOrder.verify(mockTx, never()).delete(eq(LogicalDatastoreType.OPERATIONAL),
+              any(InstanceIdentifier.class));
+    }
+
+    @SuppressWarnings("rawtypes")
+    private void assertDeletedIDs(InstanceIdentifier[] expDeletedIIDs,
+            ArgumentCaptor<InstanceIdentifier> deletedLinkIDs) {
+        Set<InstanceIdentifier> actualIIDs = new HashSet<>(deletedLinkIDs.getAllValues());
+        for(InstanceIdentifier id: expDeletedIIDs) {
+            assertTrue("Missing expected deleted IID " + id, actualIIDs.contains(id));
+        }
+    }
+
+    private void setReadFutureAsync(final Topology topology,
+            final SettableFuture<Optional<Topology>> readFuture) {
+        new Thread() {
+            @Override
+            public void run() {
+                Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+                readFuture.set(Optional.of(topology));
+            }
+
+        }.start();
+    }
+
+    private void waitForSubmit(CountDownLatch latch) {
+        assertEquals("Transaction submitted", true,
+                Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS));
+    }
+
+    private void waitForDeletes(int expDeleteCalls, final CountDownLatch latch) {
+        boolean done = Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS);
+        if(!done) {
+            fail("Expected " + expDeleteCalls + " delete calls. Actual: " +
+                    (expDeleteCalls - latch.getCount()));
+        }
+    }
+
+    private CountDownLatch setupStubbedSubmit(ReadWriteTransaction mockTx) {
+        final CountDownLatch latch = new CountDownLatch(1);
+        doAnswer(new Answer<CheckedFuture<Void, TransactionCommitFailedException>>() {
+            @Override
+            public CheckedFuture<Void, TransactionCommitFailedException> answer(
+                                                            InvocationOnMock invocation) {
+                latch.countDown();
+                return Futures.immediateCheckedFuture(null);
+            }
+        }).when(mockTx).submit();
+
+        return latch;
+    }
+
+    @SuppressWarnings("rawtypes")
+    private void setupStubbedDeletes(ReadWriteTransaction mockTx,
+            ArgumentCaptor<InstanceIdentifier> deletedLinkIDs, final CountDownLatch latch) {
+        doAnswer(new Answer<Void>() {
+            @Override
+            public Void answer(InvocationOnMock invocation) {
+                latch.countDown();
+                return null;
+            }
+        }).when(mockTx).delete(eq(LogicalDatastoreType.OPERATIONAL), deletedLinkIDs.capture());
+    }
+
+    private org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                        newInvNodeKey(String id) {
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey nodeKey =
+                new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey(
+                        new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.
+                                                                      rev130819.NodeId(id));
+        return nodeKey;
+    }
+
+    private NodeConnectorKey newInvNodeConnKey(String id) {
+        return new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey(
+                new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.
+                                                               NodeConnectorId(id));
+    }
+
+    private KeyedInstanceIdentifier<NodeConnector, NodeConnectorKey> newNodeConnID(
+            org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey nodeKey,
+            org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey) {
+        return InstanceIdentifier.create(Nodes.class).child(
+                org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+                nodeKey).child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.
+                        rev130819.node.NodeConnector.class, ncKey);
+    }
+
+    private Link newLink(String id, Source source, Destination dest) {
+        return new LinkBuilder().setLinkId(new LinkId(id))
+                .setSource(source).setDestination(dest).build();
+    }
+
+    private Destination newDestTp(String id) {
+        return new DestinationBuilder().setDestTp(new TpId(id)).build();
+    }
+
+    private Source newSourceTp(String id) {
+        return new SourceBuilder().setSourceTp(new TpId(id)).build();
+    }
+
+    private Destination newDestNode(String id) {
+        return new DestinationBuilder().setDestNode(new NodeId(id)).build();
+    }
+
+    private Source newSourceNode(String id) {
+        return new SourceBuilder().setSourceNode(new NodeId(id)).build();
+    }
+}
index 8720c654c61f993f09d6503305fdc471d4065a94..1d12292edb2624ee12b7d1c469c44102ab617dfc 100644 (file)
@@ -9,6 +9,7 @@ package org.opendaylight.controller.netconf.confignetconfconnector.mapping.attri
 
 import com.google.common.base.Preconditions;
 
+import com.google.common.base.Strings;
 import java.util.List;
 import java.util.Map;
 
@@ -52,7 +53,7 @@ public class ObjectNameAttributeReadingStrategy extends AbstractAttributeReading
     public static String checkPrefixAndExtractServiceName(XmlElement typeElement, Map.Entry<String, String> prefixNamespace) throws NetconfDocumentedException {
         String serviceName = typeElement.getTextContent();
         // FIXME: comparing Entry with String:
-        Preconditions.checkState(!prefixNamespace.equals(""), "Service %s value not prefixed with namespace",
+        Preconditions.checkState(!Strings.isNullOrEmpty(prefixNamespace.getKey()), "Service %s value not prefixed with namespace",
                 XmlNetconfConstants.TYPE_KEY);
         String prefix = prefixNamespace.getKey() + PREFIX_SEPARATOR;
         Preconditions.checkState(serviceName.startsWith(prefix),
index 8e7ba708c6cea71ba46400f99fb765a2113bf762..94a90a782ebbd82e5a35643d27e15f9607248537 100644 (file)
@@ -47,11 +47,11 @@ public class SimpleIdentityRefAttributeReadingStrategy extends SimpleAttributeRe
         Date revision = null;
         Map<Date, EditConfig.IdentityMapping> revisions = identityMap.get(namespace);
         if(revisions.keySet().size() > 1) {
-            for (Date date : revisions.keySet()) {
-                if(revisions.get(date).containsIdName(localName)) {
+            for (Map.Entry<Date, EditConfig.IdentityMapping> revisionToIdentityEntry : revisions.entrySet()) {
+                if(revisionToIdentityEntry.getValue().containsIdName(localName)) {
                     Preconditions.checkState(revision == null, "Duplicate identity %s, in namespace %s, with revisions: %s, %s detected. Cannot map attribute",
-                            localName, namespace, revision, date);
-                    revision = date;
+                            localName, namespace, revision, revisionToIdentityEntry.getKey());
+                    revision = revisionToIdentityEntry.getKey();
                 }
             }
         } else {
index 5d41b784f5cdaa4ff264774b05069ad6e83f3ff3..773e4ee933c147d6206394b8da51fd4f8094358f 100644 (file)
@@ -60,11 +60,11 @@ public class Config {
 
         Map<String, Map<String, Collection<ObjectName>>> retVal = Maps.newLinkedHashMap();
 
-        for (String namespace : configs.keySet()) {
+        for (Entry<String, Map<String, ModuleConfig>> namespaceToModuleToConfigEntry : configs.entrySet()) {
 
             Map<String, Collection<ObjectName>> innerRetVal = Maps.newHashMap();
 
-            for (Entry<String, ModuleConfig> mbeEntry : configs.get(namespace).entrySet()) {
+            for (Entry<String, ModuleConfig> mbeEntry : namespaceToModuleToConfigEntry.getValue().entrySet()) {
 
                 String moduleName = mbeEntry.getKey();
                 Collection<ObjectName> instances = moduleToInstances.get(moduleName);
@@ -80,7 +80,7 @@ public class Config {
 
             }
 
-            retVal.put(namespace, innerRetVal);
+            retVal.put(namespaceToModuleToConfigEntry.getKey(), innerRetVal);
         }
         return retVal;
     }
@@ -107,18 +107,18 @@ public class Config {
 
         Element modulesElement = XmlUtil.createElement(document, XmlNetconfConstants.MODULES_KEY, Optional.of(XmlNetconfConstants.URN_OPENDAYLIGHT_PARAMS_XML_NS_YANG_CONTROLLER_CONFIG));
         dataElement.appendChild(modulesElement);
-        for (String moduleNamespace : moduleToInstances.keySet()) {
-            for (Entry<String, Collection<ObjectName>> moduleMappingEntry : moduleToInstances.get(moduleNamespace)
+        for (Entry<String, Map<String, Collection<ObjectName>>> moduleToInstanceEntry : moduleToInstances.entrySet()) {
+            for (Entry<String, Collection<ObjectName>> moduleMappingEntry : moduleToInstanceEntry.getValue()
                     .entrySet()) {
 
-                ModuleConfig mapping = moduleConfigs.get(moduleNamespace).get(moduleMappingEntry.getKey());
+                ModuleConfig mapping = moduleConfigs.get(moduleToInstanceEntry.getKey()).get(moduleMappingEntry.getKey());
 
                 if (moduleMappingEntry.getValue().isEmpty()) {
                     continue;
                 }
 
                 for (ObjectName objectName : moduleMappingEntry.getValue()) {
-                    modulesElement.appendChild(mapping.toXml(objectName, document, moduleNamespace));
+                    modulesElement.appendChild(mapping.toXml(objectName, document, moduleToInstanceEntry.getKey()));
                 }
 
             }
index 2b363ea153960eeb98754855ffa54473e43fad14..8c2c74f2ac58d1c2144a1e8194cb46e15eb279b9 100644 (file)
@@ -24,7 +24,6 @@ public class ServiceRegistryWrapper {
         this.configServiceRefRegistry = configServiceRefRegistry;
     }
 
-
     public ObjectName getByServiceAndRefName(String namespace, String serviceName, String refName) {
         Map<String, Map<String, String>> serviceNameToRefNameToInstance = getMappedServices().get(namespace);
 
@@ -61,13 +60,13 @@ public class ServiceRegistryWrapper {
         Map<String, Map<String, Map<String, String>>> retVal = Maps.newHashMap();
 
         Map<String, Map<String, ObjectName>> serviceMapping = configServiceRefRegistry.getServiceMapping();
-        for (String serviceQName : serviceMapping.keySet()){
-            for (String refName : serviceMapping.get(serviceQName).keySet()) {
+        for (Map.Entry<String, Map<String, ObjectName>> qNameToRefNameEntry : serviceMapping.entrySet()){
+            for (String refName : qNameToRefNameEntry.getValue().keySet()) {
 
-                ObjectName on = serviceMapping.get(serviceQName).get(refName);
+                ObjectName on = qNameToRefNameEntry.getValue().get(refName);
                 Services.ServiceInstance si = Services.ServiceInstance.fromObjectName(on);
 
-                QName qname = QName.create(serviceQName);
+                QName qname = QName.create(qNameToRefNameEntry.getKey());
                 String namespace = qname.getNamespace().toString();
                 Map<String, Map<String, String>> serviceToRefs = retVal.get(namespace);
                 if(serviceToRefs==null) {
index 59a1d4fe7141bbe54a2dc770a4d2c789819bf52d..bdb4c1b067ff19947a01484165ba25563ba53e0a 100644 (file)
@@ -133,9 +133,9 @@ public final class Services {
         Element root = XmlUtil.createElement(document, XmlNetconfConstants.SERVICES_KEY, Optional.of(XmlNetconfConstants.URN_OPENDAYLIGHT_PARAMS_XML_NS_YANG_CONTROLLER_CONFIG));
 
         Map<String, Map<String, Map<String, String>>> mappedServices = serviceRegistryWrapper.getMappedServices();
-        for (String namespace : mappedServices.keySet()) {
+        for (Entry<String, Map<String, Map<String, String>>> namespaceToRefEntry : mappedServices.entrySet()) {
 
-            for (Entry<String, Map<String, String>> serviceEntry : mappedServices.get(namespace).entrySet()) {
+            for (Entry<String, Map<String, String>> serviceEntry : namespaceToRefEntry.getValue().entrySet()) {
                 // service belongs to config.yang namespace
                 Element serviceElement = XmlUtil.createElement(document, SERVICE_KEY, Optional.<String>absent());
                 root.appendChild(serviceElement);
@@ -143,7 +143,7 @@ public final class Services {
                 // type belongs to config.yang namespace
                 String serviceType = serviceEntry.getKey();
                 Element typeElement = XmlUtil.createTextElementWithNamespacedContent(document, XmlNetconfConstants.TYPE_KEY,
-                        XmlNetconfConstants.PREFIX, namespace, serviceType);
+                        XmlNetconfConstants.PREFIX, namespaceToRefEntry.getKey(), serviceType);
 
                 serviceElement.appendChild(typeElement);
 
index 543a2c4a630d90de73dbf7aa0f837bf7dd2328f7..c22dcfe67bfb00233758b3b05cdc557ae5935a32 100644 (file)
@@ -148,21 +148,20 @@ public class EditConfig extends AbstractConfigNetconfOperation {
         Map<String, Map<String, Map<String, Services.ServiceInstance>>> namespaceToServiceNameToRefNameToInstance = services
                 .getNamespaceToServiceNameToRefNameToInstance();
 
-        for (String serviceNamespace : namespaceToServiceNameToRefNameToInstance.keySet()) {
-            for (String serviceName : namespaceToServiceNameToRefNameToInstance.get(serviceNamespace).keySet()) {
+        for (Map.Entry<String, Map<String, Map<String, Services.ServiceInstance>>> namespaceToServiceToRefEntry : namespaceToServiceNameToRefNameToInstance.entrySet()) {
+            for (Map.Entry<String, Map<String, Services.ServiceInstance>> serviceToRefEntry : namespaceToServiceToRefEntry.getValue().entrySet()) {
 
-                String qnameOfService = getQname(ta, serviceNamespace, serviceName);
-                Map<String, Services.ServiceInstance> refNameToInstance = namespaceToServiceNameToRefNameToInstance
-                        .get(serviceNamespace).get(serviceName);
+                String qnameOfService = getQname(ta, namespaceToServiceToRefEntry.getKey(), serviceToRefEntry.getKey());
+                Map<String, Services.ServiceInstance> refNameToInstance = serviceToRefEntry.getValue();
 
-                for (String refName : refNameToInstance.keySet()) {
-                    ObjectName on = refNameToInstance.get(refName).getObjectName(ta.getTransactionName());
+                for (Map.Entry<String, Services.ServiceInstance> refNameToServiceEntry : refNameToInstance.entrySet()) {
+                    ObjectName on = refNameToServiceEntry.getValue().getObjectName(ta.getTransactionName());
                     try {
-                        ObjectName saved = ta.saveServiceReference(qnameOfService, refName, on);
+                        ObjectName saved = ta.saveServiceReference(qnameOfService, refNameToServiceEntry.getKey(), on);
                         logger.debug("Saving service {} with on {} under name {} with service on {}", qnameOfService,
-                                on, refName, saved);
+                                on, refNameToServiceEntry.getKey(), saved);
                     } catch (InstanceNotFoundException e) {
-                        throw new NetconfDocumentedException(String.format("Unable to save ref name " + refName + " for instance " + on, e),
+                        throw new NetconfDocumentedException(String.format("Unable to save ref name " + refNameToServiceEntry.getKey() + " for instance " + on, e),
                                 ErrorType.application,
                                 ErrorTag.operation_failed,
                                 ErrorSeverity.error);
@@ -271,18 +270,18 @@ public class EditConfig extends AbstractConfigNetconfOperation {
 
         Map<String, Map<String, ModuleConfig>> namespaceToModuleNameToModuleConfig = Maps.newHashMap();
 
-        for (String namespace : mBeanEntries.keySet()) {
-            for (Map.Entry<String, ModuleMXBeanEntry> moduleNameToMbe : mBeanEntries.get(namespace).entrySet()) {
+        for (Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleToMbe : mBeanEntries.entrySet()) {
+            for (Map.Entry<String, ModuleMXBeanEntry> moduleNameToMbe : namespaceToModuleToMbe.getValue().entrySet()) {
                 String moduleName = moduleNameToMbe.getKey();
                 ModuleMXBeanEntry moduleMXBeanEntry = moduleNameToMbe.getValue();
 
                 ModuleConfig moduleConfig = new ModuleConfig(moduleName,
                         new InstanceConfig(configRegistryClient,moduleMXBeanEntry.getAttributes(), moduleMXBeanEntry.getNullableDummyContainerName()));
 
-                Map<String, ModuleConfig> moduleNameToModuleConfig = namespaceToModuleNameToModuleConfig.get(namespace);
+                Map<String, ModuleConfig> moduleNameToModuleConfig = namespaceToModuleNameToModuleConfig.get(namespaceToModuleToMbe.getKey());
                 if(moduleNameToModuleConfig == null) {
                     moduleNameToModuleConfig = Maps.newHashMap();
-                    namespaceToModuleNameToModuleConfig.put(namespace, moduleNameToModuleConfig);
+                    namespaceToModuleNameToModuleConfig.put(namespaceToModuleToMbe.getKey(), moduleNameToModuleConfig);
                 }
 
                 moduleNameToModuleConfig.put(moduleName, moduleConfig);
index fc95046dfdbef94e3e139480cae950c7010a83f9..9155bb95d2e73e812c46c15b3ff973cabb92875f 100644 (file)
@@ -54,13 +54,13 @@ public class Get extends AbstractConfigNetconfOperation {
             Map<String, Map<String, ModuleMXBeanEntry>> mBeanEntries) {
         Map<String, Map<String, ModuleRuntime>> retVal = Maps.newHashMap();
 
-        for (String namespace : mBeanEntries.keySet()) {
+        for (Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleEntry : mBeanEntries.entrySet()) {
 
             Map<String, ModuleRuntime> innerMap = Maps.newHashMap();
-            Map<String, ModuleMXBeanEntry> entriesFromNamespace = mBeanEntries.get(namespace);
-            for (String module : entriesFromNamespace.keySet()) {
+            Map<String, ModuleMXBeanEntry> entriesFromNamespace = namespaceToModuleEntry.getValue();
+            for (Map.Entry<String, ModuleMXBeanEntry> moduleToMXEntry : entriesFromNamespace.entrySet()) {
 
-                ModuleMXBeanEntry mbe = entriesFromNamespace.get(module);
+                ModuleMXBeanEntry mbe = moduleToMXEntry.getValue();
 
                 Map<RuntimeBeanEntry, InstanceConfig> cache = Maps.newHashMap();
                 RuntimeBeanEntry root = null;
@@ -77,10 +77,10 @@ public class Get extends AbstractConfigNetconfOperation {
 
                 InstanceRuntime rootInstanceRuntime = createInstanceRuntime(root, cache);
                 ModuleRuntime moduleRuntime = new ModuleRuntime(rootInstanceRuntime);
-                innerMap.put(module, moduleRuntime);
+                innerMap.put(moduleToMXEntry.getKey(), moduleRuntime);
             }
 
-            retVal.put(namespace, innerMap);
+            retVal.put(namespaceToModuleEntry.getKey(), innerMap);
         }
         return retVal;
     }
index 21021fec55661f69cd2c1e19325f0dd651e07193..0d9c61bad7b17b961294ce3562a7abca56f4d851 100644 (file)
@@ -81,8 +81,7 @@ public class RuntimeRpc extends AbstractConfigNetconfOperation {
         final String[] signature = new String[attributes.size()];
 
         int i = 0;
-        for (final String attrName : attributes.keySet()) {
-            final AttributeConfigElement attribute = attributes.get(attrName);
+        for (final AttributeConfigElement attribute : attributes.values()) {
             final Optional<?> resolvedValueOpt = attribute.getResolvedValue();
 
             params[i] = resolvedValueOpt.isPresent() ? resolvedValueOpt.get() : attribute.getResolvedDefaultValue();
@@ -248,23 +247,23 @@ public class RuntimeRpc extends AbstractConfigNetconfOperation {
 
         final Map<String, Map<String, ModuleRpcs>> map = Maps.newHashMap();
 
-        for (final String namespace : mBeanEntries.keySet()) {
+        for (final Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleEntry : mBeanEntries.entrySet()) {
 
-            Map<String, ModuleRpcs> namespaceToModules = map.get(namespace);
+            Map<String, ModuleRpcs> namespaceToModules = map.get(namespaceToModuleEntry.getKey());
             if (namespaceToModules == null) {
                 namespaceToModules = Maps.newHashMap();
-                map.put(namespace, namespaceToModules);
+                map.put(namespaceToModuleEntry.getKey(), namespaceToModules);
             }
 
-            for (final String moduleName : mBeanEntries.get(namespace).keySet()) {
+            for (final Map.Entry<String, ModuleMXBeanEntry> moduleEntry : namespaceToModuleEntry.getValue().entrySet()) {
 
-                ModuleRpcs rpcMapping = namespaceToModules.get(moduleName);
+                ModuleRpcs rpcMapping = namespaceToModules.get(moduleEntry.getKey());
                 if (rpcMapping == null) {
                     rpcMapping = new ModuleRpcs();
-                    namespaceToModules.put(moduleName, rpcMapping);
+                    namespaceToModules.put(moduleEntry.getKey(), rpcMapping);
                 }
 
-                final ModuleMXBeanEntry entry = mBeanEntries.get(namespace).get(moduleName);
+                final ModuleMXBeanEntry entry = moduleEntry.getValue();
 
                 for (final RuntimeBeanEntry runtimeEntry : entry.getRuntimeBeans()) {
                     rpcMapping.addNameMapping(runtimeEntry);
index beab62e997457055ce56ddbf646d40292905bf6a..11d2e748bfff0f2c3f4060e957a61b06d5e7e624 100644 (file)
@@ -55,7 +55,7 @@ public class TransactionProvider implements AutoCloseable {
         allOpenedTransactions.clear();
     }
 
-    public Optional<ObjectName> getTransaction() {
+    public synchronized Optional<ObjectName> getTransaction() {
 
         if (transaction == null){
             return Optional.absent();
index 5f311b5232ed676263925ce6af94b779c662df2e..b346522f4498b587c5550f7898f6a9639b48f395 100644 (file)
@@ -10,6 +10,9 @@ package org.opendaylight.controller.netconf.persist.impl;
 
 import static com.google.common.base.Preconditions.checkNotNull;
 
+import com.google.common.base.Function;
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.Collections2;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.Collection;
@@ -23,10 +26,9 @@ import java.util.TreeMap;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
-
+import javax.annotation.Nonnull;
 import javax.annotation.concurrent.Immutable;
 import javax.management.MBeanServerConnection;
-
 import org.opendaylight.controller.config.api.ConflictingVersionException;
 import org.opendaylight.controller.config.persist.api.ConfigPusher;
 import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
@@ -49,10 +51,6 @@ import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.xml.sax.SAXException;
 
-import com.google.common.base.Function;
-import com.google.common.base.Stopwatch;
-import com.google.common.collect.Collections2;
-
 @Immutable
 public class ConfigPusherImpl implements ConfigPusher {
     private static final Logger logger = LoggerFactory.getLogger(ConfigPusherImpl.class);
@@ -200,7 +198,7 @@ public class ConfigPusherImpl implements ConfigPusher {
     private static Set<String> computeNotFoundCapabilities(Set<String> expectedCapabilities, NetconfOperationService serviceCandidate) {
         Collection<String> actual = Collections2.transform(serviceCandidate.getCapabilities(), new Function<Capability, String>() {
             @Override
-            public String apply(Capability input) {
+            public String apply(@Nonnull final Capability input) {
                 return input.getCapabilityUri();
             }
         });
index 81fac5f12f9c7dc9d3bf40922c19723836c036ce..2a45e1757b8655a6b6be8bcf097867b677fd70d1 100644 (file)
@@ -13,6 +13,10 @@ import com.google.common.collect.Collections2;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 import io.netty.util.internal.ConcurrentSet;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import javax.annotation.Nonnull;
 import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
 import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
 import org.opendaylight.controller.netconf.mapping.api.Capability;
@@ -32,11 +36,6 @@ import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.mon
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.annotation.Nullable;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
 public class NetconfMonitoringServiceImpl implements NetconfMonitoringService, SessionMonitoringService {
 
     private static final Logger logger = LoggerFactory.getLogger(NetconfMonitoringServiceImpl.class);
@@ -134,9 +133,8 @@ public class NetconfMonitoringServiceImpl implements NetconfMonitoringService, S
 
     private List<Session> transformSessions(Set<NetconfManagementSession> sessions) {
         return Lists.newArrayList(Collections2.transform(sessions, new Function<NetconfManagementSession, Session>() {
-            @Nullable
             @Override
-            public Session apply(@Nullable NetconfManagementSession input) {
+            public Session apply(@Nonnull NetconfManagementSession input) {
                 return input.toManagementSession();
             }
         }));
index ff96ad779fbc974539ced455494129381bb09635..aa590604b0087d5f7cc79f135a246b6a929abe07 100644 (file)
@@ -48,7 +48,7 @@ public class NetconfOperationRouterImpl implements NetconfOperationRouter {
         this.netconfOperationServiceSnapshot = netconfOperationServiceSnapshot;
     }
 
-    private void initNetconfOperations(Set<NetconfOperation> allOperations) {
+    private synchronized void initNetconfOperations(Set<NetconfOperation> allOperations) {
         allNetconfOperations = allOperations;
     }
 
index 272b686fc034ff33fbbe247b706ec536040660cb..3a70a399bb8f82f563eb5e9f7eaa252c9f28eeb0 100644 (file)
       <artifactId>config-util</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>${project.groupId}</groupId>
+      <artifactId>sal-netconf-connector</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>${project.groupId}</groupId>
       <artifactId>netconf-api</artifactId>
index 4fe5f2a9504c60b8a448ff58feadbecb557799b3..1adcd7e49176e65cc15c5fb558015e5e6e129205 100644 (file)
@@ -10,27 +10,35 @@ package org.opendaylight.controller.netconf.it;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
 import io.netty.channel.local.LocalAddress;
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.GenericFutureListener;
 import io.netty.util.concurrent.GlobalEventExecutor;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
 import org.opendaylight.controller.netconf.api.NetconfMessage;
 import org.opendaylight.controller.netconf.auth.AuthProvider;
 import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
 import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
 import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
 import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
 import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
@@ -42,7 +50,15 @@ import org.opendaylight.controller.netconf.ssh.authentication.PEMGenerator;
 import org.opendaylight.controller.netconf.util.messages.NetconfMessageUtil;
 import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
 import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.api.RemoteDevice;
+import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
 import org.opendaylight.protocol.framework.NeverReconnectStrategy;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.xml.sax.SAXException;
 
 public class NetconfITSecureTest extends AbstractNetconfConfigTest {
 
@@ -70,7 +86,7 @@ public class NetconfITSecureTest extends AbstractNetconfConfigTest {
     @Test
     public void testSecure() throws Exception {
         final NetconfClientDispatcher dispatch = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
-        try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration())) {
+        try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration(new SimpleNetconfClientSessionListener()))) {
             NetconfMessage response = netconfClient.sendMessage(getGetConfig());
             assertFalse("Unexpected error message " + XmlUtil.toString(response.getDocument()),
                     NetconfMessageUtil.isErrorMessage(response));
@@ -91,29 +107,42 @@ public class NetconfITSecureTest extends AbstractNetconfConfigTest {
     /**
      * Test all requests are handled properly and no mismatch occurs in listener
      */
-    @Test(timeout = 3*60*1000)
+    @Test(timeout = 6*60*1000)
     public void testSecureStress() throws Exception {
+        final int requests = 4000;
+
         final NetconfClientDispatcher dispatch = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
-        try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration())) {
+        final NetconfDeviceCommunicator sessionListener = getSessionListener();
+        try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration(sessionListener))) {
 
             final AtomicInteger responseCounter = new AtomicInteger(0);
-            final List<Future<?>> futures = Lists.newArrayList();
+            final List<ListenableFuture<RpcResult<NetconfMessage>>> futures = Lists.newArrayList();
 
-            final int requests = 1000;
             for (int i = 0; i < requests; i++) {
-                final Future<NetconfMessage> netconfMessageFuture = netconfClient.sendRequest(getGetConfig());
+                NetconfMessage getConfig = getGetConfig();
+                getConfig = changeMessageId(getConfig, i);
+                final ListenableFuture<RpcResult<NetconfMessage>> netconfMessageFuture = sessionListener.sendRequest(getConfig, QName.create("namespace", "2012-12-12", "get"));
                 futures.add(netconfMessageFuture);
-                netconfMessageFuture.addListener(new GenericFutureListener<Future<? super NetconfMessage>>() {
+                Futures.addCallback(netconfMessageFuture, new FutureCallback<RpcResult<NetconfMessage>>() {
                     @Override
-                    public void operationComplete(final Future<? super NetconfMessage> future) throws Exception {
-                        assertTrue("Request unsuccessful " + future.cause(), future.isSuccess());
+                    public void onSuccess(final RpcResult<NetconfMessage> result) {
                         responseCounter.incrementAndGet();
                     }
+
+                    @Override
+                    public void onFailure(final Throwable t) {
+                        throw new RuntimeException(t);
+                    }
                 });
             }
 
-            for (final Future<?> future : futures) {
-                future.await();
+            // Wait for every future
+            for (final ListenableFuture<RpcResult<NetconfMessage>> future : futures) {
+                try {
+                    future.get(3, TimeUnit.MINUTES);
+                } catch (final TimeoutException e) {
+                    fail("Request " + futures.indexOf(future) + " is not responding");
+                }
             }
 
             // Give future listeners some time to finish counter incrementation
@@ -123,10 +152,17 @@ public class NetconfITSecureTest extends AbstractNetconfConfigTest {
         }
     }
 
-    public NetconfClientConfiguration getClientConfiguration() throws IOException {
+    private NetconfMessage changeMessageId(final NetconfMessage getConfig, final int i) throws IOException, SAXException {
+        String s = XmlUtil.toString(getConfig.getDocument(), false);
+        s = s.replace("101", Integer.toString(i));
+        return new NetconfMessage(XmlUtil.readXmlToDocument(s));
+    }
+
+    public NetconfClientConfiguration getClientConfiguration(final NetconfClientSessionListener sessionListener) throws IOException {
         final NetconfClientConfigurationBuilder b = NetconfClientConfigurationBuilder.create();
         b.withAddress(TLS_ADDRESS);
-        b.withSessionListener(new SimpleNetconfClientSessionListener());
+        // Using session listener from sal-netconf-connector since stress test cannot be performed with simple listener
+        b.withSessionListener(sessionListener);
         b.withReconnectStrategy(new NeverReconnectStrategy(GlobalEventExecutor.INSTANCE, 5000));
         b.withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH);
         b.withConnectionTimeoutMillis(5000);
@@ -134,6 +170,16 @@ public class NetconfITSecureTest extends AbstractNetconfConfigTest {
         return b.build();
     }
 
+    @Mock
+    private RemoteDevice<NetconfSessionCapabilities, NetconfMessage> mockedRemoteDevice;
+
+    private NetconfDeviceCommunicator getSessionListener() {
+        MockitoAnnotations.initMocks(this);
+        doNothing().when(mockedRemoteDevice).onRemoteSessionUp(any(NetconfSessionCapabilities.class), any(RemoteDeviceCommunicator.class));
+        doNothing().when(mockedRemoteDevice).onRemoteSessionDown();
+        return new NetconfDeviceCommunicator(new RemoteDeviceId("secure-test"), mockedRemoteDevice);
+    }
+
     public AuthProvider getAuthProvider() throws Exception {
         final AuthProvider mockAuth = mock(AuthProvider.class);
         doReturn("mockedAuth").when(mockAuth).toString();
index c5037d34ed4ac01fcea4b5dd264b8f715ad2cc9c..91fb805e6ab3f00a14096faea945eb0f4bad97b0 100644 (file)
@@ -7,6 +7,7 @@
     </appender>
 
   <logger name="org.opendaylight.controller.netconf" level="TRACE"/>
+  <logger name="org.opendaylight.controller.sal.connect.netconf" level="TRACE"/>
 
   <root level="error">
     <appender-ref ref="STDOUT" />
index 078509db40f1c71d511bf5881e75b79e75e0f82c..16b38eca5172a96a6254713dbefb6dbd44324949 100644 (file)
@@ -11,6 +11,7 @@ package org.opendaylight.controller.netconf.monitoring.xml.model;
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Collections2;
+import javax.annotation.Nonnull;
 import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.Yang;
 import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.schemas.Schema;
 
@@ -41,7 +42,7 @@ final class MonitoringSchema {
         return Collections2.transform(schema.getLocation(), new Function<Schema.Location, String>() {
             @Nullable
             @Override
-            public String apply(@Nullable Schema.Location input) {
+            public String apply(@Nonnull Schema.Location input) {
                 return input.getEnumeration().toString();
             }
         });
index 08441b4ce5308f0bb85bfe606177f40bbfcc4f29..4b5dcd7d5578941523d0b76eb6f88aa97c353b13 100644 (file)
@@ -69,11 +69,11 @@ public class JaxBSerializerTest {
                 "<session-id>1</session-id>" +
                 "<in-bad-rpcs>0</in-bad-rpcs>" +
                 "<in-rpcs>0</in-rpcs>" +
-                "<login-time>loginTime</login-time>" +
+                "<login-time>2010-10-10T12:32:32Z</login-time>" +
                 "<out-notifications>0</out-notifications>" +
                 "<out-rpc-errors>0</out-rpc-errors>" +
                 "<ncme:session-identifier>client</ncme:session-identifier>" +
-                "<source-host>address/port</source-host>" +
+                "<source-host>192.168.1.1</source-host>" +
                 "<transport>ncme:netconf-tcp</transport>" +
                 "<username>username</username>" +
                 "</session>"));
@@ -96,8 +96,8 @@ public class JaxBSerializerTest {
         final Session1 mockedSession1 = mock(Session1.class);
         doReturn("client").when(mockedSession1).getSessionIdentifier();
         doReturn(1L).when(mocked).getSessionId();
-        doReturn(new DateAndTime("loginTime")).when(mocked).getLoginTime();
-        doReturn(new Host(new DomainName("address/port"))).when(mocked).getSourceHost();
+        doReturn(new DateAndTime("2010-10-10T12:32:32Z")).when(mocked).getLoginTime();
+        doReturn(new Host(new DomainName("192.168.1.1"))).when(mocked).getSourceHost();
         doReturn(new ZeroBasedCounter32(0L)).when(mocked).getInBadRpcs();
         doReturn(new ZeroBasedCounter32(0L)).when(mocked).getInRpcs();
         doReturn(new ZeroBasedCounter32(0L)).when(mocked).getOutNotifications();
diff --git a/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHanderReader.java b/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHanderReader.java
new file mode 100644 (file)
index 0000000..73a24f2
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
+
+import io.netty.buffer.Unpooled;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelOutboundHandler;
+import org.apache.sshd.common.future.SshFutureListener;
+import org.apache.sshd.common.io.IoInputStream;
+import org.apache.sshd.common.io.IoReadFuture;
+import org.apache.sshd.common.util.Buffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Listener on async input stream from SSH session.
+ * This listeners schedules reads in a loop until the session is closed or read fails.
+ */
+final class AsyncSshHanderReader implements SshFutureListener<IoReadFuture>, AutoCloseable {
+
+    private static final Logger logger = LoggerFactory.getLogger(AsyncSshHandler.class);
+
+    private static final int BUFFER_SIZE = 8192;
+
+    private final ChannelOutboundHandler asyncSshHandler;
+    private final ChannelHandlerContext ctx;
+
+    private IoInputStream asyncOut;
+    private Buffer buf;
+    private IoReadFuture currentReadFuture;
+
+    public AsyncSshHanderReader(final ChannelOutboundHandler asyncSshHandler, final ChannelHandlerContext ctx, final IoInputStream asyncOut) {
+        this.asyncSshHandler = asyncSshHandler;
+        this.ctx = ctx;
+        this.asyncOut = asyncOut;
+        buf = new Buffer(BUFFER_SIZE);
+        asyncOut.read(buf).addListener(this);
+    }
+
+    @Override
+    public synchronized void operationComplete(final IoReadFuture future) {
+        if(future.getException() != null) {
+            if(asyncOut.isClosed() || asyncOut.isClosing()) {
+                // Ssh dropped
+                logger.debug("Ssh session dropped on channel: {}", ctx.channel(), future.getException());
+            } else {
+                logger.warn("Exception while reading from SSH remote on channel {}", ctx.channel(), future.getException());
+            }
+            invokeDisconnect();
+            return;
+        }
+
+        if (future.getRead() > 0) {
+            ctx.fireChannelRead(Unpooled.wrappedBuffer(buf.array(), 0, future.getRead()));
+
+            // Schedule next read
+            buf = new Buffer(BUFFER_SIZE);
+            currentReadFuture = asyncOut.read(buf);
+            currentReadFuture.addListener(this);
+        }
+    }
+
+    private void invokeDisconnect() {
+        try {
+            asyncSshHandler.disconnect(ctx, ctx.newPromise());
+        } catch (final Exception e) {
+            // This should not happen
+            throw new IllegalStateException(e);
+        }
+    }
+
+    @Override
+    public synchronized void close() {
+        // Remove self as listener on close to prevent reading from closed input
+        if(currentReadFuture != null) {
+            currentReadFuture.removeListener(this);
+        }
+
+        asyncOut = null;
+    }
+}
index 369c013832790eef19dc2b751baa6a9564bb7800..3bd72320232bb7f912a316469b816fa76952f0c4 100644 (file)
@@ -9,10 +9,7 @@
 package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
 
 import com.google.common.base.Preconditions;
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.Unpooled;
 import io.netty.channel.ChannelHandlerContext;
-import io.netty.channel.ChannelOutboundHandler;
 import io.netty.channel.ChannelOutboundHandlerAdapter;
 import io.netty.channel.ChannelPromise;
 import java.io.IOException;
@@ -25,12 +22,6 @@ import org.apache.sshd.client.future.ConnectFuture;
 import org.apache.sshd.client.future.OpenFuture;
 import org.apache.sshd.common.future.CloseFuture;
 import org.apache.sshd.common.future.SshFutureListener;
-import org.apache.sshd.common.io.IoInputStream;
-import org.apache.sshd.common.io.IoOutputStream;
-import org.apache.sshd.common.io.IoReadFuture;
-import org.apache.sshd.common.io.IoWriteFuture;
-import org.apache.sshd.common.io.WritePendingException;
-import org.apache.sshd.common.util.Buffer;
 import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -56,8 +47,8 @@ public class AsyncSshHandler extends ChannelOutboundHandlerAdapter {
     private final AuthenticationHandler authenticationHandler;
     private final SshClient sshClient;
 
-    private SshReadAsyncListener sshReadAsyncListener;
-    private SshWriteAsyncHandler sshWriteAsyncHandler;
+    private AsyncSshHanderReader sshReadAsyncListener;
+    private AsyncSshHandlerWriter sshWriteAsyncHandler;
 
     private ClientChannel channel;
     private ClientSession session;
@@ -147,10 +138,10 @@ public class AsyncSshHandler extends ChannelOutboundHandlerAdapter {
         connectPromise.setSuccess();
         connectPromise = null;
 
-        sshReadAsyncListener = new SshReadAsyncListener(this, ctx, channel.getAsyncOut());
+        sshReadAsyncListener = new AsyncSshHanderReader(this, ctx, channel.getAsyncOut());
         // if readAsyncListener receives immediate close, it will close this handler and closing this handler sets channel variable to null
         if(channel != null) {
-            sshWriteAsyncHandler = new SshWriteAsyncHandler(this, channel.getAsyncIn());
+            sshWriteAsyncHandler = new AsyncSshHandlerWriter(channel.getAsyncIn());
             ctx.fireChannelActive();
         }
     }
@@ -207,173 +198,4 @@ public class AsyncSshHandler extends ChannelOutboundHandlerAdapter {
         ctx.fireChannelInactive();
     }
 
-    /**
-     * Listener over async input stream from SSH session.
-     * This listeners schedules reads in a loop until the session is closed or read fails.
-     */
-    private static class SshReadAsyncListener implements SshFutureListener<IoReadFuture>, AutoCloseable {
-        private static final int BUFFER_SIZE = 8192;
-
-        private final ChannelOutboundHandler asyncSshHandler;
-        private final ChannelHandlerContext ctx;
-
-        private IoInputStream asyncOut;
-        private Buffer buf;
-        private IoReadFuture currentReadFuture;
-
-        public SshReadAsyncListener(final ChannelOutboundHandler asyncSshHandler, final ChannelHandlerContext ctx, final IoInputStream asyncOut) {
-            this.asyncSshHandler = asyncSshHandler;
-            this.ctx = ctx;
-            this.asyncOut = asyncOut;
-            buf = new Buffer(BUFFER_SIZE);
-            asyncOut.read(buf).addListener(this);
-        }
-
-        @Override
-        public synchronized void operationComplete(final IoReadFuture future) {
-            if(future.getException() != null) {
-                if(asyncOut.isClosed() || asyncOut.isClosing()) {
-                    // Ssh dropped
-                    logger.debug("Ssh session dropped on channel: {}", ctx.channel(), future.getException());
-                } else {
-                    logger.warn("Exception while reading from SSH remote on channel {}", ctx.channel(), future.getException());
-                }
-                invokeDisconnect();
-                return;
-            }
-
-            if (future.getRead() > 0) {
-                ctx.fireChannelRead(Unpooled.wrappedBuffer(buf.array(), 0, future.getRead()));
-
-                // Schedule next read
-                buf = new Buffer(BUFFER_SIZE);
-                currentReadFuture = asyncOut.read(buf);
-                currentReadFuture.addListener(this);
-            }
-        }
-
-        private void invokeDisconnect() {
-            try {
-                asyncSshHandler.disconnect(ctx, ctx.newPromise());
-            } catch (final Exception e) {
-                // This should not happen
-                throw new IllegalStateException(e);
-            }
-        }
-
-        @Override
-        public synchronized void close() {
-            // Remove self as listener on close to prevent reading from closed input
-            if(currentReadFuture != null) {
-                currentReadFuture.removeListener(this);
-            }
-
-            asyncOut = null;
-        }
-    }
-
-    private static final class SshWriteAsyncHandler implements AutoCloseable {
-        public static final int MAX_PENDING_WRITES = 100;
-
-        private final ChannelOutboundHandler channelHandler;
-        private IoOutputStream asyncIn;
-
-        // Counter that holds the amount of pending write messages
-        // Pending write can occur in case remote window is full
-        // In such case, we need to wait for the pending write to finish
-        private int pendingWriteCounter;
-        // Last write future, that can be pending
-        private IoWriteFuture lastWriteFuture;
-
-        public SshWriteAsyncHandler(final ChannelOutboundHandler channelHandler, final IoOutputStream asyncIn) {
-            this.channelHandler = channelHandler;
-            this.asyncIn = asyncIn;
-        }
-
-        int c = 0;
-
-        public synchronized void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) {
-            try {
-                if(asyncIn == null || asyncIn.isClosed() || asyncIn.isClosing()) {
-                    // If we are closed/closing, set immediate fail
-                    promise.setFailure(new IllegalStateException("Channel closed"));
-                } else {
-                    lastWriteFuture = asyncIn.write(toBuffer(msg));
-                    lastWriteFuture.addListener(new SshFutureListener<IoWriteFuture>() {
-
-                        @Override
-                        public void operationComplete(final IoWriteFuture future) {
-                            ((ByteBuf) msg).release();
-
-                            // Notify success or failure
-                            if (future.isWritten()) {
-                                promise.setSuccess();
-                            } else {
-                                promise.setFailure(future.getException());
-                            }
-
-                            // Reset last pending future
-                            synchronized (SshWriteAsyncHandler.this) {
-                                lastWriteFuture = null;
-                            }
-                        }
-                    });
-                }
-            } catch (final WritePendingException e) {
-                // Check limit for pending writes
-                pendingWriteCounter++;
-                if(pendingWriteCounter > MAX_PENDING_WRITES) {
-                    promise.setFailure(e);
-                    handlePendingFailed(ctx, new IllegalStateException("Too much pending writes(" + MAX_PENDING_WRITES + ") on channel: " + ctx.channel() +
-                            ", remote window is not getting read or is too small"));
-                }
-
-                logger.debug("Write pending to SSH remote on channel: {}, current pending count: {}", ctx.channel(), pendingWriteCounter);
-
-                // In case of pending, re-invoke write after pending is finished
-                Preconditions.checkNotNull(lastWriteFuture, "Write is pending, but there was no previous write attempt", e);
-                lastWriteFuture.addListener(new SshFutureListener<IoWriteFuture>() {
-                    @Override
-                    public void operationComplete(final IoWriteFuture future) {
-                        if (future.isWritten()) {
-                            synchronized (SshWriteAsyncHandler.this) {
-                                // Pending done, decrease counter
-                                pendingWriteCounter--;
-                            }
-                            write(ctx, msg, promise);
-                        } else {
-                            // Cannot reschedule pending, fail
-                            handlePendingFailed(ctx, e);
-                        }
-                    }
-
-                });
-            }
-        }
-
-        private void handlePendingFailed(final ChannelHandlerContext ctx, final Exception e) {
-            logger.warn("Exception while writing to SSH remote on channel {}", ctx.channel(), e);
-            try {
-                channelHandler.disconnect(ctx, ctx.newPromise());
-            } catch (final Exception ex) {
-                // This should not happen
-                throw new IllegalStateException(ex);
-            }
-        }
-
-        @Override
-        public void close() {
-            asyncIn = null;
-        }
-
-        private Buffer toBuffer(final Object msg) {
-            // TODO Buffer vs ByteBuf translate, Can we handle that better ?
-            Preconditions.checkState(msg instanceof ByteBuf);
-            final ByteBuf byteBuf = (ByteBuf) msg;
-            final byte[] temp = new byte[byteBuf.readableBytes()];
-            byteBuf.readBytes(temp, 0, byteBuf.readableBytes());
-            return new Buffer(temp);
-        }
-
-    }
 }
diff --git a/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandlerWriter.java b/opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandlerWriter.java
new file mode 100644 (file)
index 0000000..eace0ac
--- /dev/null
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+import io.netty.buffer.ByteBuf;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelPromise;
+import java.util.Deque;
+import java.util.LinkedList;
+import java.util.Queue;
+import org.apache.sshd.common.future.SshFutureListener;
+import org.apache.sshd.common.io.IoOutputStream;
+import org.apache.sshd.common.io.IoWriteFuture;
+import org.apache.sshd.common.io.WritePendingException;
+import org.apache.sshd.common.util.Buffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Async Ssh writer. Takes messages(byte arrays) and sends them encrypted to remote server.
+ * Also handles pending writes by caching requests until pending state is over.
+ */
+final class AsyncSshHandlerWriter implements AutoCloseable {
+
+    private static final Logger logger = LoggerFactory
+            .getLogger(AsyncSshHandlerWriter.class);
+
+    // public static final int MAX_PENDING_WRITES = 1000;
+    // TODO implement Limiting mechanism for pending writes
+    // But there is a possible issue with limiting:
+    // 1. What to do when queue is full ? Immediate Fail for every request ?
+    // 2. At this level we might be dealing with Chunks of messages(not whole messages) and unexpected behavior might occur
+    // when we send/queue 1 chunk and fail the other chunks
+
+    private IoOutputStream asyncIn;
+
+    // Order has to be preserved for queued writes
+    private final Deque<PendingWriteRequest> pending = new LinkedList<>();
+
+    public AsyncSshHandlerWriter(final IoOutputStream asyncIn) {
+        this.asyncIn = asyncIn;
+    }
+
+    public synchronized void write(final ChannelHandlerContext ctx,
+            final Object msg, final ChannelPromise promise) {
+        // TODO check for isClosed, isClosing might be performed by mina SSH internally and is not required here
+        // If we are closed/closing, set immediate fail
+        if (asyncIn == null || asyncIn.isClosed() || asyncIn.isClosing()) {
+            promise.setFailure(new IllegalStateException("Channel closed"));
+        } else {
+            final ByteBuf byteBufMsg = (ByteBuf) msg;
+            if (pending.isEmpty() == false) {
+                queueRequest(ctx, byteBufMsg, promise);
+                return;
+            }
+
+            writeWithPendingDetection(ctx, promise, byteBufMsg);
+        }
+    }
+
+    private void writeWithPendingDetection(final ChannelHandlerContext ctx, final ChannelPromise promise, final ByteBuf byteBufMsg) {
+        try {
+            if (logger.isTraceEnabled()) {
+                logger.trace("Writing request on channel: {}, message: {}", ctx.channel(), byteBufToString(byteBufMsg));
+            }
+            asyncIn.write(toBuffer(byteBufMsg)).addListener(new SshFutureListener<IoWriteFuture>() {
+
+                        @Override
+                        public void operationComplete(final IoWriteFuture future) {
+                            if (logger.isTraceEnabled()) {
+                                logger.trace("Ssh write request finished on channel: {} with result: {}: and ex:{}, message: {}",
+                                        ctx.channel(), future.isWritten(), future.getException(), byteBufToString(byteBufMsg));
+                            }
+
+                            // Notify success or failure
+                            if (future.isWritten()) {
+                                promise.setSuccess();
+                            } else {
+                                logger.warn("Ssh write request failed on channel: {} for message: {}", ctx.channel(), byteBufToString(byteBufMsg), future.getException());
+                                promise.setFailure(future.getException());
+                            }
+
+                            // Not needed anymore, release
+                            byteBufMsg.release();
+
+                            // Check pending queue and schedule next
+                            // At this time we are guaranteed that we are not in pending state anymore so the next request should succeed
+                            writePendingIfAny();
+                        }
+                    });
+        } catch (final WritePendingException e) {
+            queueRequest(ctx, byteBufMsg, promise);
+        }
+    }
+
+    private synchronized void writePendingIfAny() {
+        if (pending.peek() == null) {
+            return;
+        }
+
+        // In case of pending, reschedule next message from queue
+        final PendingWriteRequest pendingWrite = pending.poll();
+        final ByteBuf msg = pendingWrite.msg;
+        if (logger.isTraceEnabled()) {
+            logger.trace("Writing pending request on channel: {}, message: {}", pendingWrite.ctx.channel(), byteBufToString(msg));
+        }
+
+        writeWithPendingDetection(pendingWrite.ctx, pendingWrite.promise, msg);
+    }
+
+    private static String byteBufToString(final ByteBuf msg) {
+        msg.resetReaderIndex();
+        final String s = msg.toString(Charsets.UTF_8);
+        msg.resetReaderIndex();
+        return s;
+    }
+
+    private void queueRequest(final ChannelHandlerContext ctx, final ByteBuf msg, final ChannelPromise promise) {
+//        try {
+        logger.debug("Write pending on channel: {}, queueing, current queue size: {}", ctx.channel(), pending.size());
+        if (logger.isTraceEnabled()) {
+            logger.trace("Queueing request due to pending: {}", byteBufToString(msg));
+        }
+        new PendingWriteRequest(ctx, msg, promise).pend(pending);
+//        } catch (final Exception ex) {
+//            logger.warn("Unable to queue write request on channel: {}. Setting fail for the request: {}", ctx.channel(), ex, byteBufToString(msg));
+//            msg.release();
+//            promise.setFailure(ex);
+//        }
+    }
+
+    @Override
+    public synchronized void close() {
+        asyncIn = null;
+    }
+
+    private Buffer toBuffer(final ByteBuf msg) {
+        // TODO Buffer vs ByteBuf translate, Can we handle that better ?
+        final byte[] temp = new byte[msg.readableBytes()];
+        msg.readBytes(temp, 0, msg.readableBytes());
+        return new Buffer(temp);
+    }
+
+    private static final class PendingWriteRequest {
+        private final ChannelHandlerContext ctx;
+        private final ByteBuf msg;
+        private final ChannelPromise promise;
+
+        public PendingWriteRequest(final ChannelHandlerContext ctx, final ByteBuf msg, final ChannelPromise promise) {
+            this.ctx = ctx;
+            // Reset reader index, last write (failed) attempt moved index to the end
+            msg.resetReaderIndex();
+            this.msg = msg;
+            this.promise = promise;
+        }
+
+        public void pend(final Queue<PendingWriteRequest> pending) {
+            // Preconditions.checkState(pending.size() < MAX_PENDING_WRITES,
+            // "Too much pending writes(%s) on channel: %s, remote window is not getting read or is too small",
+            // pending.size(), ctx.channel());
+            Preconditions.checkState(pending.offer(this), "Cannot pend another request write (pending count: %s) on channel: %s",
+                    pending.size(), ctx.channel());
+        }
+    }
+}
index 223f2c7f94469dcfcadd1908bb803105356522e9..d0fc43d04aa2ce656f7616072522437009e92342 100644 (file)
@@ -23,12 +23,9 @@ import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.verifyNoMoreInteractions;
 import static org.mockito.Mockito.verifyZeroInteractions;
 
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.Unpooled;
 import java.io.IOException;
 import java.net.SocketAddress;
 
-import java.nio.channels.WritePendingException;
 import org.apache.sshd.ClientChannel;
 import org.apache.sshd.ClientSession;
 import org.apache.sshd.SshClient;
@@ -46,6 +43,7 @@ import org.apache.sshd.common.io.IoWriteFuture;
 import org.apache.sshd.common.util.Buffer;
 import org.junit.After;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.mockito.Matchers;
 import org.mockito.Mock;
@@ -59,6 +57,8 @@ import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
 
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
 import io.netty.channel.Channel;
 import io.netty.channel.ChannelHandlerContext;
 import io.netty.channel.ChannelPromise;
@@ -351,19 +351,16 @@ public class AsyncSshHandlerTest {
 
         // make first write stop pending
         firstWriteListener.operationComplete(ioWriteFuture);
-        // intercept third listener, this is regular listener for second write to determine success or failure
-        final ListenableFuture<SshFutureListener<IoWriteFuture>> afterPendingListener = stubAddListener(ioWriteFuture);
 
         // notify listener for second write that pending has ended
         pendingListener.get().operationComplete(ioWriteFuture);
-        // Notify third listener (regular listener for second write) that second write succeeded
-        afterPendingListener.get().operationComplete(ioWriteFuture);
 
         // verify both write promises successful
         verify(firstWritePromise).setSuccess();
         verify(secondWritePromise).setSuccess();
     }
 
+    @Ignore("Pending queue is not limited")
     @Test
     public void testWritePendingMax() throws Exception {
         asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
@@ -389,11 +386,11 @@ public class AsyncSshHandlerTest {
         final ChannelPromise secondWritePromise = getMockedPromise();
         // now make write throw pending exception
         doThrow(org.apache.sshd.common.io.WritePendingException.class).when(asyncIn).write(any(Buffer.class));
-        for (int i = 0; i < 1000; i++) {
+        for (int i = 0; i < 1001; i++) {
             asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0, 1, 2, 3, 4, 5}), secondWritePromise);
         }
 
-        verify(ctx).fireChannelInactive();
+        verify(secondWritePromise, times(1)).setFailure(any(Throwable.class));
     }
 
     @Test
index c686bcbc66fcb849da3f357bdd3bea2df7f62f2e..0d0f95c3cb4d0856ba56c470f0890eb51371caa1 100644 (file)
@@ -123,6 +123,7 @@ public class NetconfSSHActivator implements BundleActivator {
             final AuthProvider authService = bundleContext.getService(reference);
             final Integer newServicePreference = getPreference(reference);
             if(isBetter(newServicePreference)) {
+                maxPreference = newServicePreference;
                 server.setAuthProvider(authService);
                 if(sshThread == null) {
                     sshThread = runNetconfSshThread(server);
index d314c31b4fcd6288821109046e59cf1b20fd7ef8..da5e2090ff4cfb70d0c253ad339154dccd9ac107 100644 (file)
@@ -71,7 +71,7 @@ public class AuthProviderImpl implements AuthProvider {
     }
 
     @VisibleForTesting
-    void setNullableUserManager(final IUserManager nullableUserManager) {
+    synchronized void setNullableUserManager(final IUserManager nullableUserManager) {
         this.nullableUserManager = nullableUserManager;
     }
 }
index 25e0f7926574c3168bc013cff865ec2f6a8a33db..b6f5854aa331140cf21c1de433c5f4a9263e6201 100644 (file)
@@ -116,8 +116,8 @@ public abstract class AbstractNetconfOperation implements NetconfOperation {
             rpcReply.appendChild(responseNS);
         }
 
-        for (String attrName : attributes.keySet()) {
-            rpcReply.setAttributeNode((Attr) document.importNode(attributes.get(attrName), true));
+        for (Attr attribute : attributes.values()) {
+            rpcReply.setAttributeNode((Attr) document.importNode(attribute, true));
         }
         document.appendChild(rpcReply);
         return document;
index 3e8040ad8a1d87e916415795e08e38814963537b..c532b7f9a6f235a0eebc7dc4a53bc10f2208dbc4 100644 (file)
@@ -10,7 +10,9 @@ package org.opendaylight.controller.netconf.util.messages;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Collections2;
-
+import java.util.Collection;
+import java.util.List;
+import javax.annotation.Nonnull;
 import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
 import org.opendaylight.controller.netconf.api.NetconfMessage;
 import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
@@ -19,11 +21,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 
-import javax.annotation.Nullable;
-
-import java.util.Collection;
-import java.util.List;
-
 public final class NetconfMessageUtil {
 
     private static final Logger logger = LoggerFactory.getLogger(NetconfMessageUtil.class);
@@ -67,9 +64,8 @@ public final class NetconfMessageUtil {
         List<XmlElement> caps = capabilitiesElement.getChildElements(XmlNetconfConstants.CAPABILITY);
         return Collections2.transform(caps, new Function<XmlElement, String>() {
 
-            @Nullable
             @Override
-            public String apply(@Nullable XmlElement input) {
+            public String apply(@Nonnull XmlElement input) {
                 // Trim possible leading/tailing whitespace
                 try {
                     return input.getTextContent().trim();
index 333fea3493172286fdba2c807eff105760741411..c77e0d7da25dc112e54489fbae42bf317a3f2b3f 100644 (file)
@@ -53,29 +53,6 @@ public final class NetconfConfigUtil {
         }
     }
 
-    /**
-     * Get extracted address or default.
-     *
-     * @throws java.lang.IllegalStateException if neither address is present.
-     */
-    private static InetSocketAddress getNetconfAddress(final InetSocketAddress defaultAddress, Optional<InetSocketAddress> extractedAddress, InfixProp infix) {
-        InetSocketAddress inetSocketAddress;
-
-        if (extractedAddress.isPresent() == false) {
-            logger.debug("Netconf {} address not found, falling back to default {}", infix, defaultAddress);
-
-            if (defaultAddress == null) {
-                logger.warn("Netconf {} address not found, default address not provided", infix);
-                throw new IllegalStateException("Netconf " + infix + " address not found, default address not provided");
-            }
-            inetSocketAddress = defaultAddress;
-        } else {
-            inetSocketAddress = extractedAddress.get();
-        }
-
-        return inetSocketAddress;
-    }
-
     public static String getPrivateKeyPath(final BundleContext context) {
         return getPropertyValue(context, getPrivateKeyKey());
     }
index 4e3a66b7ec5f7edfe0399bcab74e6a1843ed0449..78efe7e9723154bd218bafa5d248af0329fb567d 100644 (file)
@@ -20,7 +20,6 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import javax.annotation.Nullable;
 import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
 import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
 import org.opendaylight.controller.netconf.util.exception.UnexpectedElementException;
@@ -204,7 +203,7 @@ public final class XmlElement {
         return Lists.newArrayList(Collections2.filter(getChildElementsWithinNamespace(namespace),
                 new Predicate<XmlElement>() {
                     @Override
-                    public boolean apply(@Nullable XmlElement xmlElement) {
+                    public boolean apply(XmlElement xmlElement) {
                         return xmlElement.getName().equals(childName);
                     }
                 }));
@@ -298,7 +297,7 @@ public final class XmlElement {
         List<XmlElement> children = getChildElementsWithinNamespace(namespace);
         children = Lists.newArrayList(Collections2.filter(children, new Predicate<XmlElement>() {
             @Override
-            public boolean apply(@Nullable XmlElement xmlElement) {
+            public boolean apply(XmlElement xmlElement) {
                 return xmlElement.getName().equals(childName);
             }
         }));
@@ -436,7 +435,7 @@ public final class XmlElement {
         List<XmlElement> children = getChildElementsWithinNamespace(getNamespace());
         return Lists.newArrayList(Collections2.filter(children, new Predicate<XmlElement>() {
             @Override
-            public boolean apply(@Nullable XmlElement xmlElement) {
+            public boolean apply(XmlElement xmlElement) {
                 return xmlElement.getName().equals(childName);
             }
         }));
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractLastNetconfOperationTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractLastNetconfOperationTest.java
new file mode 100644 (file)
index 0000000..62633dd
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class AbstractLastNetconfOperationTest {
+    class LastNetconfOperationImplTest extends  AbstractLastNetconfOperation  {
+
+        boolean handleWithNoSubsequentOperationsRun;
+
+        protected LastNetconfOperationImplTest(String netconfSessionIdForReporting) {
+            super(netconfSessionIdForReporting);
+            handleWithNoSubsequentOperationsRun = false;
+        }
+
+        @Override
+        protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+            handleWithNoSubsequentOperationsRun = true;
+            return null;
+        }
+
+        @Override
+        protected String getOperationName() {
+            return "";
+        }
+    }
+
+    LastNetconfOperationImplTest netconfOperation;
+
+    @Before
+    public void setUp() throws Exception {
+        netconfOperation = new LastNetconfOperationImplTest("");
+    }
+
+    @Test
+    public void testNetconfOperation() throws Exception {
+        netconfOperation.handleWithNoSubsequentOperations(null, null);
+        assertTrue(netconfOperation.handleWithNoSubsequentOperationsRun);
+        assertEquals(HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY, netconfOperation.getHandlingPriority());
+    }
+
+    @Test(expected = NetconfDocumentedException.class)
+    public void testHandle() throws Exception {
+        NetconfOperationChainedExecution operation = mock(NetconfOperationChainedExecution.class);
+        doReturn("").when(operation).toString();
+
+        doReturn(false).when(operation).isExecutionTermination();
+        netconfOperation.handle(null, null, operation);
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractNetconfOperationTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractNetconfOperationTest.java
new file mode 100644 (file)
index 0000000..ea4a6e6
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import java.io.IOException;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.xml.sax.SAXException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
+public class AbstractNetconfOperationTest {
+
+    class NetconfOperationImpl extends AbstractNetconfOperation {
+
+        public boolean handleRun;
+
+        protected NetconfOperationImpl(String netconfSessionIdForReporting) {
+            super(netconfSessionIdForReporting);
+            this.handleRun = false;
+        }
+
+        @Override
+        protected String getOperationName() {
+            return null;
+        }
+
+        @Override
+        protected Element handle(Document document, XmlElement message, NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+            this.handleRun = true;
+            try {
+                return XmlUtil.readXmlToElement("<element/>");
+            } catch (SAXException | IOException e) {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+
+    private NetconfOperationImpl netconfOperation;
+    private NetconfOperationChainedExecution operation;
+
+    @Before
+    public void setUp() throws Exception {
+        netconfOperation = new NetconfOperationImpl("str");
+        operation = mock(NetconfOperationChainedExecution.class);
+    }
+
+    @Test
+    public void testAbstractNetconfOperation() throws Exception {
+        Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/edit_config.xml");
+        assertEquals(netconfOperation.getNetconfSessionIdForReporting(), "str");
+        assertNotNull(netconfOperation.canHandle(helloMessage));
+        assertEquals(netconfOperation.getHandlingPriority(), HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY);
+
+        netconfOperation.handle(helloMessage, operation);
+        assertTrue(netconfOperation.handleRun);
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractSingletonNetconfOperationTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractSingletonNetconfOperationTest.java
new file mode 100644 (file)
index 0000000..d1310de
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import static org.junit.Assert.assertEquals;
+
+public class AbstractSingletonNetconfOperationTest {
+    class SingletonNCOperationImpl extends AbstractSingletonNetconfOperation {
+
+        protected SingletonNCOperationImpl(String netconfSessionIdForReporting) {
+            super(netconfSessionIdForReporting);
+        }
+
+        @Override
+        protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+            return null;
+        }
+
+        @Override
+        protected String getOperationName() {
+            return null;
+        }
+    }
+
+    @Test
+    public void testAbstractSingletonNetconfOperation() throws Exception {
+        SingletonNCOperationImpl operation = new SingletonNCOperationImpl("");
+        assertEquals(operation.getHandlingPriority(), HandlingPriority.HANDLE_WITH_MAX_PRIORITY);
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessageAdditionalHeaderTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessageAdditionalHeaderTest.java
new file mode 100644 (file)
index 0000000..95c9124
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class NetconfHelloMessageAdditionalHeaderTest {
+
+
+    private String customHeader = "[user;1.1.1.1:40;tcp;client;]";
+    private NetconfHelloMessageAdditionalHeader header;
+
+    @Before
+    public void setUp() throws Exception {
+        header = new NetconfHelloMessageAdditionalHeader("user", "1.1.1.1", "40", "tcp", "client");
+    }
+
+    @Test
+    public void testGetters() throws Exception {
+        assertEquals(header.getAddress(), "1.1.1.1");
+        assertEquals(header.getUserName(), "user");
+        assertEquals(header.getPort(), "40");
+        assertEquals(header.getTransport(), "tcp");
+        assertEquals(header.getSessionIdentifier(), "client");
+    }
+
+    @Test
+    public void testStaticConstructor() throws Exception {
+        NetconfHelloMessageAdditionalHeader h = NetconfHelloMessageAdditionalHeader.fromString(customHeader);
+        assertEquals(h.toString(), header.toString());
+        assertEquals(h.toFormattedString(), header.toFormattedString());
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessageTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessageTest.java
new file mode 100644 (file)
index 0000000..c39ac8e
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+
+import com.google.common.base.Optional;
+import java.util.Set;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.internal.util.collections.Sets;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class NetconfHelloMessageTest {
+
+    Set<String> caps;
+
+    @Before
+    public void setUp() throws Exception {
+        caps = Sets.newSet("cap1");
+    }
+
+    @Test
+    public void testConstructor() throws Exception {
+        NetconfHelloMessageAdditionalHeader additionalHeader = new NetconfHelloMessageAdditionalHeader("name","host","1","transp","id");
+        NetconfHelloMessage message = NetconfHelloMessage.createClientHello(caps, Optional.of(additionalHeader));
+        assertTrue(message.isHelloMessage(message));
+        assertEquals(Optional.of(additionalHeader), message.getAdditionalHeader());
+
+        NetconfHelloMessage serverMessage = NetconfHelloMessage.createServerHello(caps, 100L);
+        assertTrue(serverMessage.isHelloMessage(serverMessage));
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageHeaderTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageHeaderTest.java
new file mode 100644 (file)
index 0000000..cca89ae
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import com.google.common.base.Charsets;
+import java.util.Arrays;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+public class NetconfMessageHeaderTest {
+    @Test
+    public void testGet() throws Exception {
+        NetconfMessageHeader header = new NetconfMessageHeader(10);
+        assertEquals(header.getLength(), 10);
+
+        byte[] expectedValue = "\n#10\n".getBytes(Charsets.US_ASCII);
+        assertArrayEquals(expectedValue, header.toBytes());
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageUtilTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageUtilTest.java
new file mode 100644 (file)
index 0000000..2af34e9
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import java.util.Collection;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.w3c.dom.Document;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class NetconfMessageUtilTest {
+    @Test
+    public void testNetconfMessageUtil() throws Exception {
+        Document okMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc-reply_ok.xml");
+        assertTrue(NetconfMessageUtil.isOKMessage(new NetconfMessage(okMessage)));
+        assertFalse(NetconfMessageUtil.isErrorMessage(new NetconfMessage(okMessage)));
+
+        Document errorMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/communicationError/testClientSendsRpcReply_expectedResponse.xml");
+        assertTrue(NetconfMessageUtil.isErrorMessage(new NetconfMessage(errorMessage)));
+        assertFalse(NetconfMessageUtil.isOKMessage(new NetconfMessage(errorMessage)));
+
+        Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/client_hello.xml");
+        Collection<String> caps = NetconfMessageUtil.extractCapabilitiesFromHello(new NetconfMessage(helloMessage).getDocument());
+        assertTrue(caps.contains("urn:ietf:params:netconf:base:1.0"));
+        assertTrue(caps.contains("urn:ietf:params:netconf:base:1.1"));
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/SendErrorExceptionUtilTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/SendErrorExceptionUtilTest.java
new file mode 100644 (file)
index 0000000..c8d562c
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.util.concurrent.GenericFutureListener;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.api.NetconfSession;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.w3c.dom.Document;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.*;
+
+public class SendErrorExceptionUtilTest {
+
+    NetconfSession netconfSession;
+    ChannelFuture channelFuture;
+    Channel channel;
+    private NetconfDocumentedException exception;
+
+    @Before
+    public void setUp() throws Exception {
+        netconfSession = mock(NetconfSession.class);
+        channelFuture = mock(ChannelFuture.class);
+        channel = mock(Channel.class);
+        doReturn(channelFuture).when(netconfSession).sendMessage(any(NetconfMessage.class));
+        doReturn(channelFuture).when(channelFuture).addListener(any(GenericFutureListener.class));
+        doReturn(channelFuture).when(channel).writeAndFlush(any(NetconfMessage.class));
+        exception = new NetconfDocumentedException("err");
+    }
+
+    @Test
+    public void testSendErrorMessage1() throws Exception {
+        SendErrorExceptionUtil.sendErrorMessage(netconfSession, exception);
+        verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+        verify(netconfSession, times(1)).sendMessage(any(NetconfMessage.class));
+    }
+
+    @Test
+    public void testSendErrorMessage2() throws Exception {
+        SendErrorExceptionUtil.sendErrorMessage(channel, exception);
+        verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+    }
+
+    @Test
+    public void testSendErrorMessage3() throws Exception {
+        Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc.xml");
+        SendErrorExceptionUtil.sendErrorMessage(netconfSession, exception, new NetconfMessage(helloMessage));
+        verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/osgi/NetconfConfigUtilTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/osgi/NetconfConfigUtilTest.java
new file mode 100644 (file)
index 0000000..741d0d2
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.osgi;
+
+import com.google.common.base.Optional;
+import io.netty.channel.local.LocalAddress;
+import java.net.InetSocketAddress;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.util.NetconfUtil;
+import org.osgi.framework.BundleContext;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class NetconfConfigUtilTest {
+
+    private BundleContext bundleContext;
+
+    @Before
+    public void setUp() throws Exception {
+        bundleContext = mock(BundleContext.class);
+    }
+
+    @Test
+    public void testNetconfConfigUtil() throws Exception {
+        assertEquals(NetconfConfigUtil.getNetconfLocalAddress(), new LocalAddress("netconf"));
+
+        doReturn("").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
+        assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+
+        doReturn("a").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
+        assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+    }
+
+    @Test
+    public void testgetPrivateKeyKey() throws Exception {
+        assertEquals(NetconfConfigUtil.getPrivateKeyKey(), "netconf.ssh.pk.path");
+    }
+
+    @Test
+    public void testgetNetconfServerAddressKey() throws Exception {
+        NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+        assertEquals(NetconfConfigUtil.getNetconfServerAddressKey(prop), "netconf.tcp.address");
+    }
+
+    @Test
+    public void testExtractNetconfServerAddress() throws Exception {
+        NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+        doReturn("").when(bundleContext).getProperty(anyString());
+        assertEquals(NetconfConfigUtil.extractNetconfServerAddress(bundleContext, prop), Optional.absent());
+    }
+
+    @Test
+    public void testExtractNetconfServerAddress2() throws Exception {
+        NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+        doReturn("1.1.1.1").when(bundleContext).getProperty("netconf.tcp.address");
+        doReturn("20").when(bundleContext).getProperty("netconf.tcp.port");
+        Optional<InetSocketAddress> inetSocketAddressOptional = NetconfConfigUtil.extractNetconfServerAddress(bundleContext, prop);
+        assertTrue(inetSocketAddressOptional.isPresent());
+        assertEquals(inetSocketAddressOptional.get(), new InetSocketAddress("1.1.1.1", 20));
+    }
+
+    @Test
+    public void testGetPrivateKeyPath() throws Exception {
+        doReturn("path").when(bundleContext).getProperty("netconf.ssh.pk.path");
+        assertEquals(NetconfConfigUtil.getPrivateKeyPath(bundleContext), "path");
+    }
+
+    @Test(expected = IllegalStateException.class)
+    public void testGetPrivateKeyPath2() throws Exception {
+        doReturn(null).when(bundleContext).getProperty("netconf.ssh.pk.path");
+        assertEquals(NetconfConfigUtil.getPrivateKeyPath(bundleContext), "path");
+    }
+}
index 728316afabf940ed9ded1eca75ce47fe743e2d1b..19bcfe44ec5b18835b328a3f291b15eecb192c06 100644 (file)
@@ -66,6 +66,7 @@
                             org.eclipse.persistence.jaxb.rs,
                             com.sun.jersey.spi.container.servlet,
                             javax.ws.rs,
+                            javax.ws.rs.ext,
                             javax.ws.rs.core,
                             javax.xml.bind.annotation,
                             javax.xml.bind,
index 35ae71d0019ed2b0b1d4893c7e47901be6e906ea..987394402d7157f44a011c3d16ab77308855d8a6 100644 (file)
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others.  All rights reserved.
  *
  * This program and the accompanying materials are made available under the
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
@@ -190,8 +190,9 @@ public class ICMP extends Packet {
             end += rawPayload.length;
         }
         int checksumStartByte = start + getfieldOffset(CHECKSUM) / NetUtils.NumBitsInAByte;
+        int even = end & ~1;
 
-        for (int i = start; i <= (end - 1); i = i + 2) {
+        for (int i = start; i < even; i = i + 2) {
             // Skip, if the current bytes are checkSum bytes
             if (i == checksumStartByte) {
                 continue;
@@ -199,7 +200,13 @@ public class ICMP extends Packet {
             wordData = ((data[i] << 8) & 0xFF00) + (data[i + 1] & 0xFF);
             sum = sum + wordData;
         }
-        carry = (sum >> 16) & 0xFF;
+        if (even < end) {
+            // Add the last octet with zero padding.
+            wordData = (data[even] << 8) & 0xFF00;
+            sum = sum + wordData;
+        }
+
+        carry = sum >>> 16;
         finalSum = (sum & 0xFFFF) + carry;
         return (short) ~((short) finalSum & 0xFFFF);
     }
index 3363f423d695f1b2f090e6c5274715f47d765e3c..56793c41f6ef624efedd743b9682bb13bede8018 100644 (file)
@@ -260,7 +260,17 @@ public class IPv4 extends Packet {
      */
     public void setHeaderField(String headerField, byte[] readValue) {
         if (headerField.equals(PROTOCOL)) {
-            payloadClass = protocolClassMap.get(readValue[0]);
+            // Don't set payloadClass if framgment offset is not zero.
+            byte[] fragoff = hdrFieldsMap.get(FRAGOFFSET);
+            if (fragoff == null || BitBufferHelper.getShort(fragoff) == 0) {
+                payloadClass = protocolClassMap.get(readValue[0]);
+            }
+        } else if (headerField.equals(FRAGOFFSET)) {
+            if (readValue != null && BitBufferHelper.getShort(readValue) != 0) {
+                // Clear payloadClass because protocol header is not present
+                // in this packet.
+                payloadClass = null;
+            }
         } else if (headerField.equals(OPTIONS) &&
                    (readValue == null || readValue.length == 0)) {
             hdrFieldsMap.remove(headerField);
index e81fbf02cfafc4550ebfb5e5558d144e55d0696d..287b73ae3c22fda416fe21a8f003bc9d6e451312 100644 (file)
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others.  All rights reserved.
  *
  * This program and the accompanying materials are made available under the
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
@@ -9,6 +9,8 @@
 
 package org.opendaylight.controller.sal.packet;
 
+import java.util.Arrays;
+
 import junit.framework.Assert;
 
 import org.junit.Test;
@@ -74,28 +76,58 @@ public class ICMPTest {
                 (byte) 0x2b, (byte) 0x2c, (byte) 0x2d, (byte) 0x2e,
                 (byte) 0x2f, (byte) 0x30, (byte) 0x31, (byte) 0x32,
                 (byte) 0x33, (byte) 0x34, (byte) 0x35, (byte) 0x36, (byte) 0x37 };
+        serializeTest(icmpRawPayload, (short)0xe553);
+
+        serializeTest(null, (short)0xb108);
+        serializeTest(new byte[0], (short)0xb108);
+
+        byte[] odd = {
+            (byte)0xba, (byte)0xd4, (byte)0xc7, (byte)0x53,
+            (byte)0xf8, (byte)0x59, (byte)0x68, (byte)0x77,
+            (byte)0xfd, (byte)0x27, (byte)0xe0, (byte)0x5b,
+            (byte)0xd0, (byte)0x2e, (byte)0x28, (byte)0x41,
+            (byte)0xa3, (byte)0x48, (byte)0x5d, (byte)0x2e,
+            (byte)0x7d, (byte)0x5b, (byte)0xd3, (byte)0x60,
+            (byte)0xb3, (byte)0x88, (byte)0x8d, (byte)0x0f,
+            (byte)0x1d, (byte)0x87, (byte)0x51, (byte)0x0f,
+            (byte)0x6a, (byte)0xff, (byte)0xf7, (byte)0xd4,
+            (byte)0x40, (byte)0x35, (byte)0x4e, (byte)0x01,
+            (byte)0x36,
+        };
+        serializeTest(odd, (short)0xd0ad);
+
+        // Large payload that causes 16-bit checksum overflow more than
+        // 255 times.
+        byte[] largeEven = new byte[1024];
+        Arrays.fill(largeEven, (byte)0xff);
+        serializeTest(largeEven, (short)0xb108);
+
+        byte[] largeOdd = new byte[1021];
+        Arrays.fill(largeOdd, (byte)0xff);
+        serializeTest(largeOdd, (short)0xb207);
+    }
 
-        short checksum = (short)0xe553;
-
-        // Create ICMP object
+    private void serializeTest(byte[] payload, short checksum)
+        throws PacketException {
         ICMP icmp = new ICMP();
-        icmp.setType((byte)8);
-        icmp.setCode((byte)0);
-        icmp.setIdentifier((short) 0x46f5);
-        icmp.setSequenceNumber((short) 2);
-        icmp.setRawPayload(icmpRawPayload);
-        //icmp.setChecksum(checksum);
+        icmp.setType((byte)8).setCode((byte)0).
+            setIdentifier((short)0x46f5).setSequenceNumber((short)2);
+        int payloadSize = 0;
+        if (payload != null) {
+            icmp.setRawPayload(payload);
+            payloadSize = payload.length;
+        }
 
         // Serialize
-        byte[] stream = icmp.serialize();
-        Assert.assertTrue(stream.length == 64);
+        byte[] data = icmp.serialize();
+        Assert.assertEquals(payloadSize + 8, data.length);
 
         // Deserialize
         ICMP icmpDes = new ICMP();
-        icmpDes.deserialize(stream, 0, stream.length);
+        icmpDes.deserialize(data, 0, data.length);
 
         Assert.assertFalse(icmpDes.isCorrupted());
-        Assert.assertTrue(icmpDes.getChecksum() == checksum);
-        Assert.assertTrue(icmp.equals(icmpDes));
+        Assert.assertEquals(checksum, icmpDes.getChecksum());
+        Assert.assertEquals(icmp, icmpDes);
     }
 }
index f5298711b677a64ef9c80863580743ce2c394e52..b98342831cdf9256a01ae98698ae91babf0c2e97 100644 (file)
@@ -12,9 +12,9 @@ import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.Arrays;
 
-import junit.framework.Assert;
-
+import org.junit.Assert;
 import org.junit.Test;
+
 import org.opendaylight.controller.sal.match.Match;
 import org.opendaylight.controller.sal.match.MatchType;
 import org.opendaylight.controller.sal.utils.EtherTypes;
@@ -481,4 +481,200 @@ public class IPv4Test {
         Assert.assertEquals(protocol, (byte) match.getField(MatchType.NW_PROTO).getValue());
         Assert.assertEquals(tos, (byte) match.getField(MatchType.NW_TOS).getValue());
     }
+
+    @Test
+    public void testFragment() throws Exception {
+        byte[] payload1 = new byte[0];
+        byte[] payload2 = {
+            (byte)0x61, (byte)0xd1, (byte)0x3d, (byte)0x51,
+            (byte)0x1b, (byte)0x75, (byte)0xa7, (byte)0x83,
+        };
+        byte[] payload3 = {
+            (byte)0xe7, (byte)0x0f, (byte)0x2d, (byte)0x7e,
+            (byte)0x15, (byte)0xba, (byte)0xe7, (byte)0x6d,
+            (byte)0xb5, (byte)0xc5, (byte)0xb5, (byte)0x37,
+            (byte)0x59, (byte)0xbc, (byte)0x91, (byte)0x43,
+            (byte)0xb5, (byte)0xb7, (byte)0xe4, (byte)0x28,
+            (byte)0xec, (byte)0x62, (byte)0x6b, (byte)0x6a,
+            (byte)0xd1, (byte)0xcb, (byte)0x79, (byte)0x1e,
+            (byte)0xfc, (byte)0x82, (byte)0xf5, (byte)0xb4,
+        };
+
+        // Ensure that the payload is not deserialized if the fragment offset
+        // is not zero.
+        byte proto = IPProtocols.TCP.byteValue();
+        fragmentTest(payload1, proto, (short)0xf250);
+        fragmentTest(payload2, proto, (short)0xf248);
+        fragmentTest(payload3, proto, (short)0xf230);
+
+        proto = IPProtocols.UDP.byteValue();
+        fragmentTest(payload1, proto, (short)0xf245);
+        fragmentTest(payload2, proto, (short)0xf23d);
+        fragmentTest(payload3, proto, (short)0xf225);
+
+        proto = IPProtocols.ICMP.byteValue();
+        fragmentTest(payload1, proto, (short)0xf255);
+        fragmentTest(payload2, proto, (short)0xf24d);
+        fragmentTest(payload3, proto, (short)0xf235);
+
+        // Ensure that the protocol header in the first fragment is
+        // deserialized.
+        proto = IPProtocols.TCP.byteValue();
+        TCP tcp = new TCP();
+        tcp.setSourcePort((short)1234).setDestinationPort((short)32000).
+            setSequenceNumber((int)0xd541f5f8).setAckNumber((int)0x58da787d).
+            setDataOffset((byte)5).setReserved((byte)0).
+            setHeaderLenFlags((short)0x18).setWindowSize((short)0x40e8).
+            setUrgentPointer((short)0x15f7).setChecksum((short)0x0d4e);
+        firstFragmentTest(tcp, payload1, proto, (short)0xdfe6);
+        tcp.setChecksum((short)0xab2a);
+        firstFragmentTest(tcp, payload2, proto, (short)0xdfde);
+        tcp.setChecksum((short)0x1c75);
+        firstFragmentTest(tcp, payload3, proto, (short)0xdfc6);
+
+        proto = IPProtocols.UDP.byteValue();
+        UDP udp = new UDP();
+        udp.setSourcePort((short)53).setDestinationPort((short)45383).
+            setLength((short)(payload1.length + 8)).setChecksum((short)0);
+        firstFragmentTest(udp, payload1, proto, (short)0xdfe7);
+        udp.setLength((short)(payload2.length + 8));
+        firstFragmentTest(udp, payload2, proto, (short)0xdfdf);
+        udp.setLength((short)(payload3.length + 8));
+        firstFragmentTest(udp, payload3, proto, (short)0xdfc7);
+
+        proto = IPProtocols.ICMP.byteValue();
+        ICMP icmp = new ICMP();
+        icmp.setType((byte)8).setCode((byte)0).setIdentifier((short)0x3d1e).
+            setSequenceNumber((short)1);
+        firstFragmentTest(icmp, payload1, proto, (short)0xdff7);
+        firstFragmentTest(icmp, payload2, proto, (short)0xdfef);
+        firstFragmentTest(icmp, payload3, proto, (short)0xdfd7);
+    }
+
+    private void fragmentTest(byte[] payload, byte proto, short checksum)
+        throws Exception {
+        // Construct a fragmented raw IPv4 packet.
+        int ipv4Len = 20;
+        byte[] rawIp = new byte[ipv4Len + payload.length];
+
+        byte ipVersion = 4;
+        byte dscp = 35;
+        byte ecn = 2;
+        byte tos = (byte)((dscp << 2) | ecn);
+        short totalLen = (short)rawIp.length;
+        short id = 22143;
+        short offset = 0xb9;
+        byte ttl = 64;
+        byte[] srcIp = {(byte)0x0a, (byte)0x00, (byte)0x00, (byte)0x01};
+        byte[] dstIp = {(byte)0xc0, (byte)0xa9, (byte)0x66, (byte)0x23};
+
+        rawIp[0] = (byte)((ipVersion << 4) | (ipv4Len >> 2));
+        rawIp[1] = tos;
+        rawIp[2] = (byte)(totalLen >>> Byte.SIZE);
+        rawIp[3] = (byte)totalLen;
+        rawIp[4] = (byte)(id >>> Byte.SIZE);
+        rawIp[5] = (byte)id;
+        rawIp[6] = (byte)(offset >>> Byte.SIZE);
+        rawIp[7] = (byte)offset;
+        rawIp[8] = ttl;
+        rawIp[9] = proto;
+        rawIp[10] = (byte)(checksum >>> Byte.SIZE);
+        rawIp[11] = (byte)checksum;
+        System.arraycopy(srcIp, 0, rawIp, 12, srcIp.length);
+        System.arraycopy(dstIp, 0, rawIp, 16, srcIp.length);
+        System.arraycopy(payload, 0, rawIp, ipv4Len, payload.length);
+
+        // Deserialize.
+        IPv4 ipv4 = new IPv4();
+        ipv4.deserialize(rawIp, 0, rawIp.length * Byte.SIZE);
+
+        Assert.assertEquals(ipVersion, ipv4.getVersion());
+        Assert.assertEquals(ipv4Len, ipv4.getHeaderLen());
+        Assert.assertEquals(dscp, ipv4.getDiffServ());
+        Assert.assertEquals(ecn, ipv4.getECN());
+        Assert.assertEquals(totalLen, ipv4.getTotalLength());
+        Assert.assertEquals(id, ipv4.getIdentification());
+        Assert.assertEquals((byte)0, ipv4.getFlags());
+        Assert.assertEquals(offset, ipv4.getFragmentOffset());
+        Assert.assertEquals(ttl, ipv4.getTtl());
+        Assert.assertEquals(proto, ipv4.getProtocol());
+        Assert.assertEquals(checksum, ipv4.getChecksum());
+        Assert.assertEquals(NetUtils.byteArray4ToInt(srcIp),
+                            ipv4.getSourceAddress());
+        Assert.assertEquals(NetUtils.byteArray4ToInt(dstIp),
+                            ipv4.getDestinationAddress());
+        Assert.assertFalse(ipv4.isCorrupted());
+
+        // payloadClass should not be set if fragment offset is not zero.
+        Assert.assertEquals(null, ipv4.getPayload());
+        Assert.assertArrayEquals(payload, ipv4.getRawPayload());
+    }
+
+    private void firstFragmentTest(Packet payload, byte[] rawPayload,
+                                   byte proto, short checksum)
+        throws Exception {
+        // Construct a raw IPv4 packet with MF flag.
+        int ipv4Len = 20;
+        payload.setRawPayload(rawPayload);
+        byte[] payloadBytes = payload.serialize();
+        byte[] rawIp = new byte[ipv4Len + payloadBytes.length];
+
+        byte ipVersion = 4;
+        byte dscp = 13;
+        byte ecn = 1;
+        byte tos = (byte)((dscp << 2) | ecn);
+        short totalLen = (short)rawIp.length;
+        short id = 19834;
+        byte flags = 0x1;
+        short offset = 0;
+        short off = (short)(((short)flags << 13) | offset);
+        byte ttl = 64;
+        byte[] srcIp = {(byte)0xac, (byte)0x23, (byte)0x5b, (byte)0xfd};
+        byte[] dstIp = {(byte)0xc0, (byte)0xa8, (byte)0x64, (byte)0x71};
+
+        rawIp[0] = (byte)((ipVersion << 4) | (ipv4Len >> 2));
+        rawIp[1] = tos;
+        rawIp[2] = (byte)(totalLen >>> Byte.SIZE);
+        rawIp[3] = (byte)totalLen;
+        rawIp[4] = (byte)(id >>> Byte.SIZE);
+        rawIp[5] = (byte)id;
+        rawIp[6] = (byte)(off >>> Byte.SIZE);
+        rawIp[7] = (byte)off;
+        rawIp[8] = ttl;
+        rawIp[9] = proto;
+        rawIp[10] = (byte)(checksum >>> Byte.SIZE);
+        rawIp[11] = (byte)checksum;
+        System.arraycopy(srcIp, 0, rawIp, 12, srcIp.length);
+        System.arraycopy(dstIp, 0, rawIp, 16, srcIp.length);
+        System.arraycopy(payloadBytes, 0, rawIp, ipv4Len, payloadBytes.length);
+
+        // Deserialize.
+        IPv4 ipv4 = new IPv4();
+        ipv4.deserialize(rawIp, 0, rawIp.length * Byte.SIZE);
+
+        Assert.assertEquals(ipVersion, ipv4.getVersion());
+        Assert.assertEquals(ipv4Len, ipv4.getHeaderLen());
+        Assert.assertEquals(dscp, ipv4.getDiffServ());
+        Assert.assertEquals(ecn, ipv4.getECN());
+        Assert.assertEquals(totalLen, ipv4.getTotalLength());
+        Assert.assertEquals(id, ipv4.getIdentification());
+        Assert.assertEquals(flags, ipv4.getFlags());
+        Assert.assertEquals(offset, ipv4.getFragmentOffset());
+        Assert.assertEquals(ttl, ipv4.getTtl());
+        Assert.assertEquals(proto, ipv4.getProtocol());
+        Assert.assertEquals(checksum, ipv4.getChecksum());
+        Assert.assertEquals(NetUtils.byteArray4ToInt(srcIp),
+                            ipv4.getSourceAddress());
+        Assert.assertEquals(NetUtils.byteArray4ToInt(dstIp),
+                            ipv4.getDestinationAddress());
+        Assert.assertFalse(ipv4.isCorrupted());
+
+        // Protocol header in the first fragment should be deserialized.
+        Assert.assertEquals(null, ipv4.getRawPayload());
+
+        Packet desPayload = ipv4.getPayload();
+        Assert.assertEquals(payload, desPayload);
+        Assert.assertFalse(desPayload.isCorrupted());
+        Assert.assertArrayEquals(rawPayload, desPayload.getRawPayload());
+    }
 }