Merge "Bug 1763: Fixed illegal state in Binding Transaction Chain."
authorEd Warnicke <eaw@cisco.com>
Thu, 18 Sep 2014 16:02:48 +0000 (16:02 +0000)
committerGerrit Code Review <gerrit@opendaylight.org>
Thu, 18 Sep 2014 16:02:48 +0000 (16:02 +0000)
286 files changed:
features/mdsal/pom.xml
features/mdsal/src/main/resources/features.xml
features/netconf/src/main/resources/features.xml
features/nsf/pom.xml
features/nsf/src/main/resources/features.xml
opendaylight/archetypes/opendaylight-configfile-archetype/pom.xml
opendaylight/archetypes/opendaylight-karaf-distro-archetype/src/main/resources/archetype-resources/pom.xml
opendaylight/commons/opendaylight/pom.xml
opendaylight/config/config-util/pom.xml
opendaylight/config/config-util/src/test/java/org/opendaylight/controller/config/util/AttributeEntryTest.java [new file with mode: 0644]
opendaylight/config/config-util/src/test/java/org/opendaylight/controller/config/util/ConfigRegistryClientsTest.java
opendaylight/config/config-util/src/test/java/org/opendaylight/controller/config/util/ConfigTransactionClientsTest.java
opendaylight/config/config-util/src/test/java/org/opendaylight/controller/config/util/TestingConfigRegistry.java
opendaylight/config/config-util/src/test/java/org/opendaylight/controller/config/util/TestingConfigTransactionController.java
opendaylight/distribution/opendaylight-karaf-resources/src/main/resources/bin/setenv [new file with mode: 0755]
opendaylight/distribution/opendaylight-karaf-resources/src/main/resources/etc/custom.properties
opendaylight/md-sal/benchmark-data-store/pom.xml [new file with mode: 0644]
opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryDatastoreWriteTransactionBenchmark.java [new file with mode: 0644]
opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/BenchmarkModel.java [new file with mode: 0644]
opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithExecutorServiceBenchmark.java [new file with mode: 0644]
opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithSameThreadedExecutorBenchmark.java [new file with mode: 0644]
opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWriteTransactionBenchmark.java [new file with mode: 0644]
opendaylight/md-sal/benchmark-data-store/src/main/resources/odl-datastore-test.yang [new file with mode: 0644]
opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/FromSalConversionsUtils.java
opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/MDFlowMapping.java
opendaylight/md-sal/compatibility/sal-compatibility/src/main/java/org/opendaylight/controller/sal/compatibility/ToSalConversionsUtils.java
opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/FromSalConversionsUtilsTest.java [new file with mode: 0644]
opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/TestFromSalConversionsUtils.java
opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/TestToSalConversionsUtils.java
opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/ToSalConversionsUtilsTest.java [new file with mode: 0644]
opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/FlowForwarder.java
opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/GroupForwarder.java
opendaylight/md-sal/forwardingrules-manager/src/main/java/org/opendaylight/controller/frm/impl/MeterForwarder.java
opendaylight/md-sal/pom.xml
opendaylight/md-sal/sal-akka-raft/pom.xml
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/TestDriver.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/DefaultConfigParamsImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/AppendEntries.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/messages/InstallSnapshot.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/FollowerTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MessageCollectorActor.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MockSnapshotStore.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/resources/application.conf
opendaylight/md-sal/sal-clustering-commons/pom.xml
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractConfig.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActor.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractUntypedActor.java with 67% similarity]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActorWithMetering.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AkkaConfigurationReader.java [moved from opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/utils/AkkaConfigurationReader.java with 87% similarity]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/CommonConfig.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/DefaultAkkaConfigurationReader.java [moved from opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/utils/DefaultAkkaConfigurationReader.java with 93% similarity]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/MeteredBoundedMailbox.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/MeteringBehavior.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/Monitor.java [moved from opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/messages/Monitor.java with 90% similarity]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/UnifiedConfig.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/CompositeModificationPayload.java [moved from opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CompositeModificationPayload.java with 95% similarity]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java [moved from opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/client/messages/Payload.java with 100% similarity]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/reporting/MetricsReporter.java [moved from opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/common/reporting/MetricsReporter.java with 90% similarity]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/common/actor/MeteredBoundedMailbox.java [deleted file]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/protobuff/messages/cluster/raft/InstallSnapshotMessages.java [moved from opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/protobuff/messages/InstallSnapshotMessages.java with 87% similarity]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/protobuff/messages/transaction/ShardTransactionChainMessages.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/protobuff/messages/transaction/ShardTransactionMessages.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/xml/codec/XmlStreamUtils.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/xml/codec/XmlUtils.java
opendaylight/md-sal/sal-clustering-commons/src/main/resources/InstallSnapshot.proto [moved from opendaylight/md-sal/sal-akka-raft/src/main/resources/InstallSnapshot.proto with 82% similarity]
opendaylight/md-sal/sal-clustering-commons/src/main/resources/ShardTransaction.proto
opendaylight/md-sal/sal-clustering-commons/src/main/resources/ShardTransactionChain.proto
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/common/actor/CommonConfigTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/common/actor/MeteredBoundedMailboxTest.java [moved from opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/common/actor/MeteredBoundedMailboxTest.java with 88% similarity]
opendaylight/md-sal/sal-clustering-commons/src/test/resources/application.conf
opendaylight/md-sal/sal-clustering-commons/src/test/resources/reference.conf
opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/05-clustering.xml.conf
opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/akka.conf
opendaylight/md-sal/sal-common-api/src/main/java/org/opendaylight/controller/md/sal/common/api/data/TransactionCommitDeadlockException.java
opendaylight/md-sal/sal-common-impl/src/main/java/org/opendaylight/controller/md/sal/common/impl/service/AbstractDataTransaction.java
opendaylight/md-sal/sal-common-util/src/main/java/org/opendaylight/controller/md/sal/common/util/jmx/ThreadExecutorStatsMXBeanImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ActorSystemFactory.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Configuration.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ConfigurationImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListener.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerRegistration.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerRegistrationProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreProperties.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardManager.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionChain.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TerminationMonitor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/AbstractBaseMBean.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStats.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStatsMBean.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseTransactionChain.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ActorContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/resources/application.conf
opendaylight/md-sal/sal-distributed-datastore/src/main/yang/distributed-datastore-provider.yang
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/AbstractActorTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/BasicIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/CompositeModificationPayloadTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ConfigurationImplTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerRegistrationProxyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionChainTest.java [deleted file]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/InMemorySnapshotStore.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/MockConfiguration.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/programs/appendentries/Client.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/programs/appendentries/Server.java
opendaylight/md-sal/sal-distributed-datastore/src/test/resources/application.conf
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/DomInmemoryDataBrokerModule.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/HashMapDataStoreModule.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/HashMapDataStoreModuleFactory.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMForwardedCompositeTransaction.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/AbstractDOMForwardedTransactionFactory.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataBrokerImpl.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataBrokerTransactionChainImpl.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataCommitCoordinatorImpl.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadOnlyTransaction.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadWriteTransaction.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedWriteTransaction.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/HashMapDataStore.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/HashMapDataStoreTransaction.java [deleted file]
opendaylight/md-sal/sal-dom-broker/src/main/yang/opendaylight-dom-broker-impl.yang
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMBrokerTest.java
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/jmx/CommitStatsMXBeanImplTest.java
opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/controller/md/sal/dom/xsql/XSQLAdapter.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStore.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreFactory.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/ResolveDataChangeEventsTask.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/ResolveDataChangeState.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedReadWriteTransaction.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedWriteTransaction.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/jmx/InMemoryDataStoreStats.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/listener/NetconfSessionCapabilities.java
opendaylight/md-sal/sal-netconf-connector/src/test/java/org/opendaylight/controller/sal/connect/netconf/listener/NetconfSessionCapabilitiesTest.java
opendaylight/md-sal/sal-remoterpc-connector/pom.xml
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/config/yang/config/remote_rpc_connector/RemoteRPCBrokerModule.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/AbstractUntypedActor.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/ActorConstants.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/ActorSystemFactory.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcImplementation.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProvider.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderConfig.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderFactory.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RoutedRpcListener.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcBroker.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcListener.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcManager.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/TerminationMonitor.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistry.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketImpl.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStore.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Gossiper.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/utils/ActorUtil.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/main/resources/application.conf
opendaylight/md-sal/sal-remoterpc-connector/src/main/yang/remote-rpc-connector.yang
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/AbstractRpcTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/ActorSystemFactoryTest.java [deleted file]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteRpcImplementationTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderConfigTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistryTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/resources/application.conf
opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/rest/api/RestconfService.java
opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/rest/impl/NormalizedNodeJsonBodyWriter.java
opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/rest/impl/RestconfDocumentedExceptionMapper.java
opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/restconf/impl/RestconfImpl.java
opendaylight/md-sal/samples/clustering-test-app/configuration/pom.xml [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/20-clustering-test-app.xml [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/module-shards.conf [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/modules.conf [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/model/pom.xml [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-people.yang [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-purchase.yang [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car.yang [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/people.yang [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/pom.xml [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/provider/pom.xml [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/listener/PeopleCarListener.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PeopleProvider.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PurchaseCarProvider.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/config/yang/config/clustering_it_provider/ClusteringItProviderModule.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/config/yang/config/clustering_it_provider/ClusteringItProviderModuleFactory.java [new file with mode: 0644]
opendaylight/md-sal/samples/clustering-test-app/provider/src/main/yang/clustering-it-provider.yang [new file with mode: 0644]
opendaylight/md-sal/samples/pom.xml
opendaylight/md-sal/topology-manager/pom.xml
opendaylight/md-sal/topology-manager/src/main/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporter.java
opendaylight/md-sal/topology-manager/src/main/java/org/opendaylight/md/controller/topology/manager/OperationProcessor.java
opendaylight/md-sal/topology-manager/src/test/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporterTest.java [new file with mode: 0644]
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/mapping/attributes/fromxml/ObjectNameAttributeReadingStrategy.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/mapping/attributes/fromxml/SimpleIdentityRefAttributeReadingStrategy.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/mapping/config/Config.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/mapping/config/ServiceRegistryWrapper.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/mapping/config/Services.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/operations/editconfig/EditConfig.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/operations/get/Get.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/operations/runtimerpc/RuntimeRpc.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/transactions/TransactionProvider.java
opendaylight/netconf/config-persister-impl/src/main/java/org/opendaylight/controller/netconf/persist/impl/ConfigPersisterNotificationHandler.java
opendaylight/netconf/config-persister-impl/src/main/java/org/opendaylight/controller/netconf/persist/impl/ConfigPusherImpl.java
opendaylight/netconf/config-persister-impl/src/main/java/org/opendaylight/controller/netconf/persist/impl/PersisterAggregator.java
opendaylight/netconf/config-persister-impl/src/test/java/org/opendaylight/controller/netconf/persist/impl/ConfigPersisterNotificationHandlerTest.java [new file with mode: 0644]
opendaylight/netconf/config-persister-impl/src/test/java/org/opendaylight/controller/netconf/persist/impl/ConfigPersisterNotificationListenerTest.java [new file with mode: 0644]
opendaylight/netconf/config-persister-impl/src/test/java/org/opendaylight/controller/netconf/persist/impl/PersisterAggregatorTest.java
opendaylight/netconf/netconf-client/pom.xml
opendaylight/netconf/netconf-client/src/main/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiator.java
opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientConfigurationTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientDispatcherImplTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiatorFactoryTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiatorTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfReconnectingClientConfigurationTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/SimpleNetconfClientSessionListenerTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/SshClientChannelInitializerTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/TcpClientChannelInitializerTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/TestingNetconfClient.java [moved from opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/test/TestingNetconfClient.java with 92% similarity]
opendaylight/netconf/netconf-impl/src/main/java/org/opendaylight/controller/netconf/impl/osgi/NetconfMonitoringServiceImpl.java
opendaylight/netconf/netconf-impl/src/main/java/org/opendaylight/controller/netconf/impl/osgi/NetconfOperationRouterImpl.java
opendaylight/netconf/netconf-impl/src/test/java/org/opendaylight/controller/netconf/impl/ConcurrentClientsTest.java
opendaylight/netconf/netconf-it/pom.xml
opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfConfigPersisterITTest.java
opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITMonitoringTest.java
opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITSecureTest.java
opendaylight/netconf/netconf-it/src/test/java/org/opendaylight/controller/netconf/it/NetconfITTest.java
opendaylight/netconf/netconf-it/src/test/resources/logback-test.xml
opendaylight/netconf/netconf-monitoring/src/main/java/org/opendaylight/controller/netconf/monitoring/xml/model/MonitoringSchema.java
opendaylight/netconf/netconf-monitoring/src/test/java/org/opendaylight/controller/netconf/monitoring/xml/JaxBSerializerTest.java
opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/EXIParameters.java
opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/NetconfStartExiMessage.java
opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandler.java
opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/AbstractChannelInitializerTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/AbstractNetconfSessionTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/NetconfEXIHandlersTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/NetconfXMLToHelloMessageDecoderTest.java
opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/EXIParametersTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/NetconfStartExiMessageTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandlerTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-ssh/src/main/java/org/opendaylight/controller/netconf/ssh/osgi/NetconfSSHActivator.java
opendaylight/netconf/netconf-ssh/src/test/java/org/opendaylight/controller/netconf/netty/SSHTest.java
opendaylight/netconf/netconf-testtool/src/main/java/org/opendaylight/controller/netconf/test/tool/NetconfDeviceSimulator.java
opendaylight/netconf/netconf-usermanager/src/main/java/org/opendaylight/controller/netconf/auth/usermanager/AuthProviderImpl.java
opendaylight/netconf/netconf-util/pom.xml
opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/mapping/AbstractNetconfOperation.java
opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessage.java
opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageUtil.java
opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/osgi/NetconfConfigUtil.java
opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/xml/XmlElement.java
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/CloseableUtilTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractLastNetconfOperationTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractNetconfOperationTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractSingletonNetconfOperationTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessageAdditionalHeaderTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessageTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageHeaderTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageUtilTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/SendErrorExceptionUtilTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/osgi/NetconfConfigUtilTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/HardcodedNamespaceResolverTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/XmlElementTest.java [new file with mode: 0644]
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/XmlUtilTest.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPool.java
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPoolMember.java
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerNorthbound.java
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolMemberRequest.java [moved from opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/INeutronLoadBalancerPoolMemberRequest.java with 82% similarity]
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolMembersNorthbound.java
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolNorthbound.java
opendaylight/sal/api/src/main/java/org/opendaylight/controller/sal/packet/ICMP.java
opendaylight/sal/api/src/main/java/org/opendaylight/controller/sal/packet/IPv4.java
opendaylight/sal/api/src/test/java/org/opendaylight/controller/sal/packet/ICMPTest.java
opendaylight/sal/api/src/test/java/org/opendaylight/controller/sal/packet/IPv4Test.java
opendaylight/topologymanager/implementation/src/main/java/org/opendaylight/controller/topologymanager/internal/TopologyManagerImpl.java
opendaylight/topologymanager/implementation/src/test/java/org/opendaylight/controller/topologymanager/internal/TopologyManagerImplTest.java

index 9b81f81ae4ce7cb9e3f35c8aba51c08a20347dd4..f45f680c3069032601bab3ef03196cc06150ec7d 100644 (file)
@@ -13,6 +13,7 @@
 
   <properties>
     <features.file>features.xml</features.file>
+    <org.json.version>20131018</org.json.version>
   </properties>
 
   <dependencies>
       <type>xml</type>
       <classifier>config</classifier>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.samples</groupId>
+      <artifactId>clustering-it-model</artifactId>
+      <version>${mdsal.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.samples</groupId>
+      <artifactId>clustering-it-provider</artifactId>
+      <version>${mdsal.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.samples</groupId>
+      <artifactId>clustering-it-config</artifactId>
+      <version>${mdsal.version}</version>
+      <type>xml</type>
+      <classifier>config</classifier>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.samples</groupId>
+      <artifactId>clustering-it-config</artifactId>
+      <version>${mdsal.version}</version>
+      <type>xml</type>
+      <classifier>testmoduleshardconf</classifier>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.samples</groupId>
+      <artifactId>clustering-it-config</artifactId>
+      <version>${mdsal.version}</version>
+      <type>xml</type>
+      <classifier>testmoduleconf</classifier>
+    </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
       <artifactId>sal-rest-docgen</artifactId>
index 132337828e80d9cf28618202855aa06cb3217548..0e24176b9fd77f239b32c5efcd234fce50f55d3e 100644 (file)
     <repository>mvn:org.opendaylight.controller/features-akka/${commons.opendaylight.version}/xml/features</repository>
     <feature name='odl-mdsal-all' version='${project.version}' description="OpenDaylight :: MDSAL :: All">
         <feature version='${project.version}'>odl-mdsal-broker</feature>
+        <feature version='${project.version}'>odl-mdsal-clustering</feature>
         <feature version='${project.version}'>odl-restconf</feature>
         <feature version='${project.version}'>odl-mdsal-xsql</feature>
-        <feature version='${project.version}'>odl-mdsal-clustering</feature>
         <feature version='${project.version}'>odl-toaster</feature>
     </feature>
     <feature name='odl-mdsal-broker' version='${project.version}' description="OpenDaylight :: MDSAL :: Broker">
         <feature version='${yangtools.version}'>odl-yangtools-common</feature>
         <feature version='${yangtools.version}'>odl-yangtools-binding</feature>
+        <feature version='${yangtools.version}'>odl-yangtools-models</feature>
         <feature version='${mdsal.version}'>odl-mdsal-common</feature>
         <feature version='${config.version}'>odl-config-startup</feature>
         <feature version='${config.version}'>odl-config-netty</feature>
@@ -35,6 +36,9 @@
     <feature name='odl-restconf' version='${project.version}' description="OpenDaylight :: Restconf">
         <feature version='${mdsal.version}'>odl-mdsal-broker</feature>
         <feature>war</feature>
+        <!-- presently we need sal-remote to be listed BEFORE sal-rest-connector because sal-rest-connector
+             has a yang file which augments a yang file in sal-remote, and order seems to matter -->
+        <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
         <bundle>mvn:org.opendaylight.controller/sal-rest-connector/${project.version}</bundle>
         <bundle>mvn:com.google.code.gson/gson/${gson.version}</bundle>
         <bundle>mvn:org.opendaylight.yangtools/yang-data-codec-gson/${yangtools.version}</bundle>
@@ -47,7 +51,6 @@
         <bundle>mvn:io.netty/netty-common/${netty.version}</bundle>
         <bundle>mvn:io.netty/netty-handler/${netty.version}</bundle>
         <bundle>mvn:io.netty/netty-transport/${netty.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
         <configfile finalname="${config.configfile.directory}/${config.restconf.configfile}">mvn:org.opendaylight.controller/sal-rest-connector-config/${mdsal.version}/xml/config</configfile>
     </feature>
     <feature name='odl-toaster' version='${project.version}' description="OpenDaylight :: Toaster">
         <configfile finalname="configuration/initial/module-shards.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleshardconf</configfile>
         <configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
     </feature>
+
+    <feature name='odl-clustering-test-app' version='${project.version}'>
+        <feature version='${project.version}'>odl-mdsal-clustering</feature>
+        <feature version='${project.version}'>odl-restconf</feature>
+        <feature version='${yangtools.version}'>odl-yangtools-models</feature>
+        <bundle>mvn:org.opendaylight.controller.samples/clustering-it-model/${project.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller.samples/clustering-it-provider/${project.version}</bundle>
+        <configfile finalname="${config.configfile.directory}/20-clustering-test-app.xml">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/config</configfile>
+        <configfile finalname="configuration/initial/module-shards.conf" override="true" >mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleshardconf</configfile>
+        <configfile finalname="configuration/initial/modules.conf" override="true">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/testmoduleconf</configfile>
+    </feature>
 </features>
index 743dae663e5f871e4d50f3e57ce86593e0742bcb..444f20865b565d48d1dd2e55d6b3362db051cf76 100644 (file)
@@ -11,8 +11,6 @@
     <feature version='${project.version}'>odl-netconf-mapping-api</feature>
     <feature version='${project.version}'>odl-netconf-util</feature>
     <feature version='${project.version}'>odl-netconf-impl</feature>
-    <feature version='${project.version}'>odl-netconf-tcp</feature>
-    <feature version='${project.version}'>odl-netconf-ssh</feature>
     <feature version='${project.version}'>odl-config-netconf-connector</feature>
     <feature version='${project.version}'>odl-netconf-netty-util</feature>
     <feature version='${project.version}'>odl-netconf-client</feature>
index 875ca2ca617d26b9246f6eda27914daa9c87d358..e677d491bc1a226fce9d0ca55e2754c993d57d7e 100644 (file)
       <groupId>org.opendaylight.controller.thirdparty</groupId>
       <artifactId>net.sf.jung2</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.eclipse.persistence</groupId>
+      <artifactId>org.eclipse.persistence.antlr</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.persistence</groupId>
+      <artifactId>org.eclipse.persistence.core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.persistence</groupId>
+      <artifactId>org.eclipse.persistence.moxy</artifactId>
+    </dependency>
   </dependencies>
   <build>
     <resources>
index 8dc51f1644c48dd31e9bf911681ea6819d7dc32b..e8f7bc1e5c8b9522c16d8851ae26393bc4af2225 100644 (file)
@@ -67,6 +67,9 @@
         <bundle>mvn:org.opendaylight.controller/flowprogrammer.northbound/${flowprogrammer.northbound.version}</bundle>
         <bundle>mvn:org.opendaylight.controller/hosttracker.northbound/${hosttracker.northbound.version}</bundle>
         <bundle>mvn:org.opendaylight.controller/networkconfig.bridgedomain.northbound/${networkconfig.bridgedomain.northbound.version}</bundle>
+        <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.antlr/${eclipse.persistence.version}</bundle>
+        <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.core/${eclipse.persistence.version}</bundle>
+        <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/${eclipse.persistence.version}</bundle>
         <bundle>mvn:org.opendaylight.controller/networkconfig.neutron.northbound/${networkconfig.neutron.northbound.version}</bundle>
         <bundle>mvn:org.opendaylight.controller/forwarding.staticrouting.northbound/${forwarding.staticrouting.northbound.version}</bundle>
         <bundle>mvn:org.opendaylight.controller/statistics.northbound/${statistics.northbound.version}</bundle>
index 38c86164e9b88067847f0d8012965c0f3aba154c..56342218a0b8205555961832f60d3fca4846a809 100644 (file)
   <distributionManagement>
     <repository>
       <id>opendaylight-release</id>
-      <url>http://nexus.opendaylight.org/content/repositories/opendaylight.release/</url>
+      <url>${nexusproxy}/repositories/opendaylight.release/</url>
     </repository>
     <snapshotRepository>
       <id>opendaylight-snapshot</id>
-      <url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+      <url>${nexusproxy}/repositories/opendaylight.snapshot/</url>
     </snapshotRepository>
     <site>
       <id>website</id>
-      <url>dav:http://nexus.opendaylight.org/content/sites/site/sal-parent</url>
+      <url>dav:${nexusproxy}/sites/site/sal-parent</url>
     </site>
   </distributionManagement>
 </project>
index 965c61969558b3dd5524dae38e8ab0a374663fb3..fdc60625c85536a4c22ab52836bed884aa4e8696 100644 (file)
              <ignorePermissions>false</ignorePermissions>
             </configuration>
           </execution>
+          <execution>
+            <id>copy-dependencies</id>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>copy-dependencies</goal>
+            </goals>
+            <configuration>
+              <outputDirectory>${project.build.directory}/assembly/system</outputDirectory>
+              <overWriteReleases>false</overWriteReleases>
+              <overWriteSnapshots>true</overWriteSnapshots>
+              <overWriteIfNewer>true</overWriteIfNewer>
+              <useRepositoryLayout>true</useRepositoryLayout>
+              <addParentPoms>true</addParentPoms>
+              <copyPom>true</copyPom>
+            </configuration>
+          </execution>
         </executions>
       </plugin>
       <plugin>
index 2e817b97f36a85eebc9a0256284221f60b2b61a3..e3ffbf356aeecb541b76da89a4c40f450016900d 100644 (file)
     <sonar.language>java</sonar.language>
     <sonar.jacoco.reportPath>target/code-coverage/jacoco.exec</sonar.jacoco.reportPath>
     <sonar.jacoco.itReportPath>target/code-coverage/jacoco-it.exec</sonar.jacoco.itReportPath>
-    <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages</sonar.skippedModules>
+    <sonar.skippedModules>org.openflow.openflowj,net.sf.jung2,org.opendaylight.controller.protobuff.messages,ch.ethz.ssh2</sonar.skippedModules>
+    <sonar.profile>Sonar way with Findbugs</sonar.profile>
     <spifly.version>1.0.0</spifly.version>
     <spring-osgi.version>1.2.1</spring-osgi.version>
     <spring-security-karaf.version>3.1.4.RELEASE</spring-security-karaf.version>
     <yang-ext.version>2013.09.07.4-SNAPSHOT</yang-ext.version>
     <yang-jmx-generator.version>1.0.0-SNAPSHOT</yang-jmx-generator.version>
     <yangtools.version>0.6.2-SNAPSHOT</yangtools.version>
-      <sshd-core.version>0.12.0</sshd-core.version>
+    <sshd-core.version>0.12.0</sshd-core.version>
+    <jmh.version>0.9.7</jmh.version>
   </properties>
 
   <dependencyManagement>
         <type>xml</type>
         <scope>runtime</scope>
       </dependency>
+      <!-- JMH Benchmark dependencies -->
+      <dependency>
+        <groupId>org.openjdk.jmh</groupId>
+        <artifactId>jmh-core</artifactId>
+        <version>${jmh.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.openjdk.jmh</groupId>
+        <artifactId>jmh-generator-annprocess</artifactId>
+        <version>${jmh.version}</version>
+      </dependency>
     </dependencies>
   </dependencyManagement>
 
   <repositories>
 
     <!-- OpenDayLight Repo Mirror -->
+    <!-- NOTE: URLs need to be hardcoded in the repository section because we have
+         parent poms that do NOT exist in this project and thus need to be pulled
+         down from the repository. To override these URLs you should use the
+         mirror section in your local settings.xml file. -->
     <repository>
       <releases>
         <enabled>true</enabled>
       <url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
     </pluginRepository>
   </pluginRepositories>
+
+  <!-- distribution management only runs when you run mvn deploy
+       which is if you are deploying compiled artifacts to a
+       maven repository. In that case logic dictacts that you already
+       compiled and thus already have the necessary parent pom files
+       that do not exist in this project pulled down to your local
+       .m2. That way the variables can be resolved and artifacts can
+       be uploaded when running mvn deploy. -->
   <distributionManagement>
     <!-- OpenDayLight Released artifact -->
     <repository>
       <id>opendaylight-release</id>
-      <url>http://nexus.opendaylight.org/content/repositories/opendaylight.release/</url>
+      <url>${nexusproxy}/repositories/opendaylight.release/</url>
     </repository>
     <!-- OpenDayLight Snapshot artifact -->
     <snapshotRepository>
       <id>opendaylight-snapshot</id>
-      <url>http://nexus.opendaylight.org/content/repositories/opendaylight.snapshot/</url>
+      <url>${nexusproxy}/repositories/opendaylight.snapshot/</url>
     </snapshotRepository>
     <!-- Site deployment -->
     <site>
index fd9c1b91e3944146b0d1ad9fb63f78acc5b2daff..29a5526451f688e3409969f8cb85b041d7496f0a 100644 (file)
       <artifactId>guava</artifactId>
       <scope>test</scope>
     </dependency>
+  <dependency>
+    <groupId>org.opendaylight.yangtools</groupId>
+    <artifactId>mockito-configuration</artifactId>
+  </dependency>
   </dependencies>
 
   <build>
diff --git a/opendaylight/config/config-util/src/test/java/org/opendaylight/controller/config/util/AttributeEntryTest.java b/opendaylight/config/config-util/src/test/java/org/opendaylight/controller/config/util/AttributeEntryTest.java
new file mode 100644 (file)
index 0000000..b2afd35
--- /dev/null
@@ -0,0 +1,32 @@
+package org.opendaylight.controller.config.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class AttributeEntryTest {
+
+    private AttributeEntry attributeEntryClient;
+    private final String key = "myKey";
+    private final String description = "myDescription";
+    private final String type = "myType";
+    private final boolean boolValue = false;
+
+    @Before
+    public void setUp() throws Exception {
+        attributeEntryClient = new AttributeEntry("myKey", "myDescription", null, "myType", false);
+    }
+
+    @Test
+    public void testAttributeEntryGetters() throws Exception{
+        assertEquals(key, attributeEntryClient.getKey());
+        assertEquals(description, attributeEntryClient.getDescription());
+        final Object value = attributeEntryClient.getValue();
+        assertNull(value);
+        assertEquals(type, attributeEntryClient.getType());
+        assertEquals(boolValue, attributeEntryClient.isRw());
+    }
+}
index 13043458c0d097d0f6e936343480e77b63ad47be..0524f0019a1414aa3282f515005b3e10df38aebd 100644 (file)
@@ -7,19 +7,28 @@
  */
 package org.opendaylight.controller.config.util;
 
-import com.google.common.collect.Sets;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.opendaylight.controller.config.api.ConfigRegistry;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.matchers.JUnitMatchers.hasItem;
+
+import java.lang.management.ManagementFactory;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
 
 import javax.management.InstanceNotFoundException;
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
-import java.lang.management.ManagementFactory;
-import java.util.Set;
 
-import static org.junit.Assert.assertEquals;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.config.api.ConfigRegistry;
+
+import com.google.common.collect.Sets;
 
 public class ConfigRegistryClientsTest {
 
@@ -27,6 +36,8 @@ public class ConfigRegistryClientsTest {
     private ObjectName testingRegistryON;
     private final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
     private ConfigRegistryClient jmxRegistryClient;
+    private ConfigTransactionClient jmxTransactionClient;
+    private Map<String, ObjectName> map;
 
     @Before
     public void setUp() throws Exception {
@@ -35,6 +46,7 @@ public class ConfigRegistryClientsTest {
         mbs.registerMBean(testingRegistry, testingRegistryON);
         jmxRegistryClient = new ConfigRegistryJMXClient(
                 ManagementFactory.getPlatformMBeanServer());
+        map = new HashMap<>();
     }
 
     @After
@@ -44,6 +56,89 @@ public class ConfigRegistryClientsTest {
         }
     }
 
+    @Test
+    public void testCreateTransaction() throws Exception{
+        jmxTransactionClient = jmxRegistryClient.createTransaction();
+        assertNotNull(jmxTransactionClient);
+    }
+
+    @Test
+    public void testGetConfigTransactionClient2() throws Exception{
+        jmxTransactionClient = jmxRegistryClient.getConfigTransactionClient("transactionName");
+        assertNotNull(jmxTransactionClient);
+    }
+
+    @Test
+    public void testGetConfigTransactionClient() throws Exception{
+        jmxTransactionClient = jmxRegistryClient.getConfigTransactionClient(testingRegistryON);
+        assertNotNull(jmxTransactionClient);
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testNewMXBeanProxy() throws Exception{
+        if (jmxRegistryClient instanceof ConfigRegistryJMXClient) {
+            ConfigRegistryJMXClient client = (ConfigRegistryJMXClient) jmxRegistryClient;
+            assertNull(client.newMXBeanProxy(testingRegistryON, String.class));
+        } else {
+            throw new AssertionError("brm msg");
+        }
+    }
+
+    @Test
+    public void testBeginConfig() throws Exception{
+        Assert.assertNotNull(jmxRegistryClient.beginConfig());
+    }
+
+    @Test
+    public void testCommitConfig() throws Exception{
+        assertNull(jmxRegistryClient.commitConfig(testingRegistryON));
+    }
+
+    @Test
+    public void testGetOpenConfigs() throws Exception{
+        assertNull(jmxRegistryClient.getOpenConfigs());
+    }
+
+    @Test(expected = RuntimeException.class)
+    public void testGetVersion() throws Exception{
+        assertEquals(3, jmxRegistryClient.getVersion());
+    }
+
+    @Test
+    public void testGetAvailableModuleNames() throws Exception{
+        assertNull(jmxRegistryClient.getAvailableModuleNames());
+    }
+
+    @Test
+    public void testIsHealthy() throws Exception{
+        assertEquals(false, jmxRegistryClient.isHealthy());
+    }
+
+    @Test
+    public void testLookupConfigBeans3() throws Exception{
+        Set<ObjectName> son = jmxRegistryClient.lookupConfigBeans();
+        assertEquals(3, son.size());
+    }
+
+    @Test
+    public void testLookupConfigBeans2() throws Exception{
+        Set<ObjectName> son = jmxRegistryClient.lookupConfigBeans(TestingConfigRegistry.moduleName1);
+        assertEquals(2, son.size());
+    }
+
+    @Test
+    public void testLookupConfigBeans() throws Exception{
+        Set<ObjectName> son = jmxRegistryClient.lookupConfigBeans(TestingConfigRegistry.moduleName1, TestingConfigRegistry.instName1);
+        Set<ObjectName> on = Sets.newHashSet(TestingConfigRegistry.conf2);
+        assertEquals(on, son);
+    }
+
+    @Test
+    public void testLookupConfigBean() throws Exception{
+        ObjectName on = jmxRegistryClient.lookupConfigBean(TestingConfigRegistry.moduleName1, null);
+        assertEquals(TestingConfigRegistry.conf3, on);
+    }
+
     @Test
     public void testLookupRuntimeBeans() throws Exception {
         Set<ObjectName> jmxLookup = lookupRuntimeBeans(jmxRegistryClient);
@@ -91,4 +186,78 @@ public class ConfigRegistryClientsTest {
         }
         return beans;
     }
+
+    @Test
+    public void testCheckConfigBeanExists() throws Exception{
+        jmxRegistryClient.checkConfigBeanExists(testingRegistryON);
+        assertEquals(true, TestingConfigRegistry.checkBool);
+    }
+
+    @Test
+    public void testLookupConfigBeanByServiceInterfaceName() throws Exception{
+        ObjectName on = clientLookupConfigBeanByServiceInterfaceName();
+        assertEquals(TestingConfigRegistry.conf1, on);
+    }
+
+    private ObjectName clientLookupConfigBeanByServiceInterfaceName(){
+        return jmxRegistryClient.lookupConfigBeanByServiceInterfaceName("qnameA", "refA");
+    }
+
+    @Test
+    public void testGetServiceMapping() throws Exception{
+        assertNull(jmxRegistryClient.getServiceMapping());
+    }
+
+    @Test
+    public void testLookupServiceReferencesByServiceInterfaceName() throws Exception{
+        map.put("conf2", TestingConfigRegistry.conf2);
+        assertEquals(map, jmxRegistryClient.lookupServiceReferencesByServiceInterfaceName("qnameB"));
+    }
+
+    @Test
+    public void testLookupServiceInterfaceNames() throws Exception{
+        assertThat(clientLookupServiceInterfaceNames(testingRegistryON), hasItem(TestingConfigRegistry.serviceQName1));
+        assertThat(clientLookupServiceInterfaceNames(testingRegistryON), hasItem(TestingConfigRegistry.serviceQName2));
+    }
+
+    private Set<String> clientLookupServiceInterfaceNames(ObjectName client) throws InstanceNotFoundException{
+        return jmxRegistryClient.lookupServiceInterfaceNames(client);
+    }
+
+    @Test
+    public void testGetServiceInterfaceName() throws Exception{
+        assertNull(jmxRegistryClient.getServiceInterfaceName(null, null));
+    }
+
+    @Test(expected = RuntimeException.class)
+    public void testInvokeMethod() throws Exception{
+        assertNull(jmxRegistryClient.invokeMethod(testingRegistryON, "name", null, null));
+    }
+
+    @Test(expected = RuntimeException.class)
+    public void testGetAttributeCurrentValue() throws Exception{
+        assertNull(jmxRegistryClient.getAttributeCurrentValue(testingRegistryON, "attrName"));
+    }
+
+    @Test
+    public void testGetAvailableModuleFactoryQNames() throws Exception{
+        for(String str : jmxRegistryClient.getAvailableModuleFactoryQNames()){
+            if(str != TestingConfigRegistry.moduleName1){
+                assertEquals(TestingConfigRegistry.moduleName2, str);
+            }
+            else{
+                assertEquals(TestingConfigRegistry.moduleName1, str);
+            }
+        }
+    }
+
+    @Test
+    public void testGetServiceReference() throws Exception{
+        Assert.assertNotNull(jmxRegistryClient.getServiceReference(null, null));
+    }
+
+    @Test(expected = UnsupportedOperationException.class)
+    public void testcheckServiceReferenceExists() throws Exception{
+        jmxRegistryClient.checkServiceReferenceExists(testingRegistryON);
+    }
 }
index 5ce6d467995f54be7a0644da658741d163515931..2f50513345c2b3ef727a8c027a1206ed78fe8bd9 100644 (file)
@@ -7,24 +7,39 @@
  */
 package org.opendaylight.controller.config.util;
 
-import com.google.common.collect.Sets;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+
+import java.lang.management.ManagementFactory;
+import java.util.Collections;
+import java.util.Map;
+import java.util.Set;
+
+import javax.management.Attribute;
+import javax.management.MBeanException;
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
+import org.opendaylight.controller.config.api.ValidationException;
+import org.opendaylight.controller.config.api.ValidationException.ExceptionMessageWithStackTrace;
 import org.opendaylight.controller.config.api.jmx.ObjectNameUtil;
 
-import javax.management.MBeanServer;
-import javax.management.ObjectName;
-import java.lang.management.ManagementFactory;
-import java.util.Set;
-
-import static org.junit.Assert.assertEquals;
+import com.google.common.collect.Sets;
 
 public class ConfigTransactionClientsTest {
     private final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
     private TestingConfigTransactionController transactionController;
     private ObjectName transactionControllerON;
     private ConfigTransactionClient jmxTransactionClient;
+    Attribute attr;
+
 
     @Before
     public void setUp() throws Exception {
@@ -32,7 +47,8 @@ public class ConfigTransactionClientsTest {
         transactionControllerON = new ObjectName(ObjectNameUtil.ON_DOMAIN + ":"
                 + ObjectNameUtil.TYPE_KEY + "=TransactionController");
         mbs.registerMBean(transactionController, transactionControllerON);
-        jmxTransactionClient = new ConfigTransactionJMXClient(null, transactionControllerON,
+        jmxTransactionClient = new ConfigTransactionJMXClient(null,
+                transactionControllerON,
                 ManagementFactory.getPlatformMBeanServer());
     }
 
@@ -47,7 +63,8 @@ public class ConfigTransactionClientsTest {
     public void testLookupConfigBeans() throws Exception {
         Set<ObjectName> jmxLookup = testClientLookupConfigBeans(jmxTransactionClient);
         assertEquals(Sets.newHashSet(transactionController.conf1,
-                transactionController.conf2, transactionController.conf3), jmxLookup);
+                transactionController.conf2, transactionController.conf3),
+                jmxLookup);
     }
 
     private Set<ObjectName> testClientLookupConfigBeans(
@@ -59,4 +76,247 @@ public class ConfigTransactionClientsTest {
         assertEquals(3, beans.size());
         return beans;
     }
+
+    @Test
+    public void testGetObjectName() throws Exception {
+        testClientGetObjectName(jmxTransactionClient);
+        assertEquals(testClientGetObjectName(jmxTransactionClient), true);
+    }
+
+    private boolean testClientGetObjectName(ConfigTransactionClient client) {
+        return transactionControllerON.equals(client.getObjectName());
+    }
+
+    @Test
+    public void testGetAvailableModuleNames() throws Exception {
+        Set<String> jmxMN = testClientGetAvailableModuleNames(jmxTransactionClient);
+        assertNull(jmxMN);
+    }
+
+    private Set<String> testClientGetAvailableModuleNames(
+            ConfigTransactionClient client) {
+        return client.getAvailableModuleNames();
+    }
+
+    @Test
+    public void testGetTransactionName() throws Exception {
+        String jmxTN = testClientGetTransactionName(jmxTransactionClient);
+        assertEquals("transactionName", jmxTN);
+    }
+
+    private String testClientGetTransactionName(ConfigTransactionClient client) {
+        return client.getTransactionName();
+    }
+
+    @Ignore
+    public void testGetVersion() throws Exception {
+        long jmxVersion = jmxTransactionClient.getVersion();
+        assertNull(jmxVersion);
+    }
+
+    @Ignore
+    public void testGetParentVersion() throws Exception {
+        long jmxParentVersion = jmxTransactionClient.getParentVersion();
+        assertNull(jmxParentVersion);
+    }
+
+    @Test
+    public void testValidateConfig() throws Exception {
+        jmxTransactionClient.validateConfig();
+    }
+
+    @Test
+    public void testAbortConfig() throws Exception {
+        jmxTransactionClient.abortConfig();
+    }
+
+    @Test
+    public void testDestroyModule2() throws Exception {
+        jmxTransactionClient.destroyModule("moduleB", "instB");
+        assertNull(transactionController.conf4);
+    }
+
+    @Test
+    public void testDestroyModule() throws Exception {
+        ObjectName on = testClientCreateModule(jmxTransactionClient);
+        jmxTransactionClient.destroyModule(on);
+    }
+
+    @Test
+    public void testCreateModule() throws Exception {
+        ObjectName on = testClientCreateModule(jmxTransactionClient);
+        Assert.assertNotNull(on);
+    }
+
+    private ObjectName testClientCreateModule(ConfigTransactionClient client)
+            throws Exception {
+        return client.createModule("testModuleName", "testInstanceName");
+    }
+
+    @Ignore
+    public void testAssertVersion() {
+        jmxTransactionClient.assertVersion((int)jmxTransactionClient.getParentVersion(),
+        (int)jmxTransactionClient.getVersion());
+    }
+
+    @Test(expected = NullPointerException.class)
+    public void testCommit() throws Exception {
+        jmxTransactionClient.commit();
+    }
+
+    @Test
+    public void testLookupConfigBeans2() throws Exception {
+        Set<ObjectName> jmxLookup = testClientLookupConfigBeans2(
+                jmxTransactionClient, "moduleB");
+        assertEquals(Sets.newHashSet(transactionController.conf3), jmxLookup);
+    }
+
+    private Set<ObjectName> testClientLookupConfigBeans2(
+            ConfigTransactionClient client, String moduleName) {
+        Set<ObjectName> beans = client.lookupConfigBeans(moduleName);
+        assertEquals(1, beans.size());
+        return beans;
+    }
+
+    @Test
+    public void testLookupConfigBean() throws Exception {
+        Set<ObjectName> jmxLookup = testClientLookupConfigBean(
+                jmxTransactionClient, "moduleB", "instB");
+        assertEquals(Sets.newHashSet(transactionController.conf3), jmxLookup);
+    }
+
+    private Set<ObjectName> testClientLookupConfigBean(
+            ConfigTransactionClient client, String moduleName,
+            String instanceName) {
+        Set<ObjectName> beans = client.lookupConfigBeans(moduleName,
+                instanceName);
+        assertEquals(1, beans.size());
+        return beans;
+    }
+
+    @Test
+    public void testLookupConfigBeans3() throws Exception {
+        Set<ObjectName> jmxLookup = testClientLookupConfigBeans3(
+                jmxTransactionClient, "moduleB", "instB");
+        assertEquals(Sets.newHashSet(transactionController.conf3), jmxLookup);
+    }
+
+    private Set<ObjectName> testClientLookupConfigBeans3(
+            ConfigTransactionClient client, String moduleName,
+            String instanceName) {
+        Set<ObjectName> beans = client.lookupConfigBeans(moduleName,
+                instanceName);
+        assertEquals(1, beans.size());
+        return beans;
+    }
+
+    @Test
+    public void testCheckConfigBeanExists() throws Exception {
+        jmxTransactionClient.checkConfigBeanExists(transactionControllerON);
+        assertEquals("configBeanExists", transactionController.check);
+    }
+
+    @Test
+    public void testSaveServiceReference() throws Exception {
+        assertEquals(transactionControllerON, jmxTransactionClient.saveServiceReference("serviceInterfaceName", "refName", transactionControllerON));
+    }
+
+    @Test
+    public void testRemoveServiceReference() throws Exception {
+        jmxTransactionClient.removeServiceReference("serviceInterface", "refName");
+        assertEquals("refName", transactionController.check);
+    }
+
+    @Test
+    public void testRemoveAllServiceReferences() throws Exception {
+        jmxTransactionClient.removeAllServiceReferences();
+        assertNull(transactionController.check);
+    }
+
+    @Test
+    public void testLookupConfigBeanByServiceInterfaceName() throws Exception {
+        assertEquals(transactionController.conf3, jmxTransactionClient.lookupConfigBeanByServiceInterfaceName("serviceInterface", "refName"));
+    }
+
+    @Test
+    public void testGetServiceMapping() throws Exception {
+        Assert.assertNotNull(jmxTransactionClient.getServiceMapping());
+    }
+
+    @Test
+    public void testLookupServiceReferencesByServiceInterfaceName() throws Exception {
+        Assert.assertNotNull(jmxTransactionClient.lookupServiceReferencesByServiceInterfaceName("serviceInterfaceQName"));
+    }
+
+    @Test
+    public void testLookupServiceInterfaceNames() throws Exception {
+        assertEquals(Sets.newHashSet("setA"), jmxTransactionClient.lookupServiceInterfaceNames(transactionControllerON));
+    }
+
+    @Test
+    public void testGetServiceInterfaceName() throws Exception {
+        assertEquals("namespace" + "localName", jmxTransactionClient.getServiceInterfaceName("namespace", "localName"));
+    }
+
+    @Test
+    public void removeServiceReferences() throws Exception {
+        assertEquals(true, jmxTransactionClient.removeServiceReferences(transactionControllerON));
+    }
+
+    @Test
+    public void testGetServiceReference() throws Exception {
+        assertEquals(transactionController.conf3, jmxTransactionClient.getServiceReference("serviceInterfaceQName", "refName"));
+    }
+
+    @Test
+    public void testCheckServiceReferenceExists() throws Exception {
+        jmxTransactionClient.checkServiceReferenceExists(transactionControllerON);
+        assertEquals("referenceExist", transactionController.check);
+    }
+
+    @Test(expected = RuntimeException.class)
+    public void testValidateBean() throws Exception {
+        jmxTransactionClient.validateBean(transactionControllerON);
+    }
+
+    @Test(expected = ValidationException.class)
+    public void testValidateBean2() throws Exception {
+        MBeanServer mbsLocal = mock(MBeanServer.class);
+        MBeanException mBeanException = new MBeanException(new ValidationException(
+                Collections.<String, Map<String, ExceptionMessageWithStackTrace>>emptyMap()));
+        doThrow(mBeanException).when(mbsLocal).invoke(transactionControllerON, "validate", null, null);
+
+        ConfigTransactionJMXClient jmxTransactionClientFake = new ConfigTransactionJMXClient(null,
+                transactionControllerON,
+                mbsLocal);
+        jmxTransactionClientFake.validateBean(transactionControllerON);
+    }
+
+    @Test(expected = RuntimeException.class)
+    public void testValidateBean3() throws Exception {
+        MBeanServer mbsLocal = mock(MBeanServer.class);
+        MBeanException mBeanException = new MBeanException(new RuntimeException());
+        doThrow(mBeanException).when(mbsLocal).invoke(transactionControllerON, "validate", null, null);
+        ConfigTransactionJMXClient jmxTransactionClientFake = new ConfigTransactionJMXClient(null,
+                transactionControllerON,
+                mbsLocal);
+        jmxTransactionClientFake.validateBean(transactionControllerON);
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testSetAttribute() throws Exception {
+        attr = null;
+        jmxTransactionClient.setAttribute(transactionControllerON, "attrName", attr);
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testGetAttribute() throws Exception {
+        attr = jmxTransactionClient.getAttribute(transactionController.conf3, "attrName");
+        assertNull(attr);
+    }
+
+    @Test
+    public void testGetAvailableModuleFactoryQNames() throws Exception {
+        Assert.assertNotNull(jmxTransactionClient.getAvailableModuleFactoryQNames());
+    }
 }
index e0d4c8594375c58739a46c1c27760c1a1c9860a1..ab6cda935bd5976fc2c1f446d82bc68393c41d35 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.config.util;
 
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -26,20 +27,31 @@ import com.google.common.collect.Sets;
 public class TestingConfigRegistry implements ConfigRegistryMXBean {
 
     static final ObjectName conf1, conf2, conf3, run1, run2, run3;
+    public static String check;
+    public static boolean checkBool;
+    private Map<String, ObjectName> map = new HashMap<>();
 
     public static final String moduleName1 = "moduleA";
     public static final String moduleName2 = "moduleB";
     public static final String instName1 = "instA";
     public static final String instName2 = "instB";
+    public static final String refName1 = "refA";
+    public static final String refName2 = "refB";
+    public static final String serviceQName1 = "qnameA";
+    public static final String serviceQName2 = "qnameB";
 
     static {
         conf1 = ObjectNameUtil.createON(ObjectNameUtil.ON_DOMAIN
                 + ":type=Module," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
-                + "=" + moduleName1);
+                + "=" + moduleName1 + "," + ObjectNameUtil.SERVICE_QNAME_KEY
+                + "=" + serviceQName1 + "," + ObjectNameUtil.REF_NAME_KEY
+                + "=" + refName1);
         conf2 = ObjectNameUtil.createON(ObjectNameUtil.ON_DOMAIN
                 + ":type=Module," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
                 + "=" + moduleName1 + "," + ObjectNameUtil.INSTANCE_NAME_KEY
-                + "=" + instName1);
+                + "=" + instName1 + "," + ObjectNameUtil.SERVICE_QNAME_KEY
+                + "=" + serviceQName2 + "," + ObjectNameUtil.REF_NAME_KEY
+                + "=" + refName1);
         conf3 = ObjectNameUtil.createON(ObjectNameUtil.ON_DOMAIN
                 + ":type=Module," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
                 + "=" + moduleName2 + "," + ObjectNameUtil.INSTANCE_NAME_KEY
@@ -55,11 +67,15 @@ public class TestingConfigRegistry implements ConfigRegistryMXBean {
                 + ":type=RuntimeBean," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
                 + "=" + moduleName2 + "," + ObjectNameUtil.INSTANCE_NAME_KEY
                 + "=" + instName2);
+
+        check = null;
+        checkBool = false;
+
     }
 
     @Override
     public ObjectName beginConfig() {
-        return null;
+        return conf2;
     }
 
     @Override
@@ -146,42 +162,60 @@ public class TestingConfigRegistry implements ConfigRegistryMXBean {
 
     @Override
     public void checkConfigBeanExists(ObjectName objectName) throws InstanceNotFoundException {
-        throw new UnsupportedOperationException();
+        Set<ObjectName> configBeans = Sets.<ObjectName> newHashSet(run1, run2, run3);
+        if(configBeans.size()>0){
+            checkBool = true;
+        }
     }
 
     @Override
     public ObjectName lookupConfigBeanByServiceInterfaceName(String serviceInterfaceQName, String refName) {
-        throw new UnsupportedOperationException();
+        if (serviceInterfaceQName.equals(serviceQName1) && refName.equals(refName1)) {
+            return conf1;
+        }
+        else{
+            return null;
+        }
     }
 
     @Override
     public Map<String, Map<String, ObjectName>> getServiceMapping() {
-        throw new UnsupportedOperationException();
+        return null;
     }
 
     @Override
     public Map<String, ObjectName> lookupServiceReferencesByServiceInterfaceName(String serviceInterfaceQName) {
-        throw new UnsupportedOperationException();
+
+        if(serviceInterfaceQName.equals(serviceQName1)){
+            map.put("conf1", conf1);
+        }
+        else if(serviceInterfaceQName.equals(serviceQName2)){
+            map.put("conf2", conf2);
+        }
+        else{
+            map.put("conf3", conf3);
+        }
+        return map;
     }
 
     @Override
     public Set<String> lookupServiceInterfaceNames(ObjectName objectName) throws InstanceNotFoundException {
-        throw new UnsupportedOperationException();
+        return Sets.<String> newHashSet(serviceQName1, serviceQName2);
     }
 
     @Override
     public String getServiceInterfaceName(String namespace, String localName) {
-        throw new UnsupportedOperationException();
+        return null;
     }
 
     @Override
     public Set<String> getAvailableModuleFactoryQNames() {
-        throw new UnsupportedOperationException();
+        return Sets.<String> newHashSet(moduleName1, moduleName2);
     }
 
     @Override
     public ObjectName getServiceReference(String serviceInterfaceQName, String refName) throws InstanceNotFoundException {
-        throw new UnsupportedOperationException();
+        return conf1;
     }
 
     @Override
index 4d16f51ae5e8e3068a32a0315dadc82ee16c6f42..ee1e61967db9b7180f232de50fdd277cc0293009 100644 (file)
@@ -7,6 +7,7 @@
  */
 package org.opendaylight.controller.config.util;
 
+import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 
@@ -24,6 +25,10 @@ public class TestingConfigTransactionController implements
         ConfigTransactionControllerMXBean {
 
     public final ObjectName conf1, conf2, conf3;
+    public ObjectName conf4;
+    public String check;
+    Map<String, ObjectName> mapSub;
+    Map<String, Map<String, ObjectName>> map;
 
     public static final String moduleName1 = "moduleA";
     public static final String moduleName2 = "moduleB";
@@ -42,17 +47,29 @@ public class TestingConfigTransactionController implements
                 + ":type=Module," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
                 + "=" + moduleName2 + "," + ObjectNameUtil.INSTANCE_NAME_KEY
                 + "=" + instName2);
+        conf4 = ObjectNameUtil.createON(ObjectNameUtil.ON_DOMAIN
+                + ":type=Module," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
+                + "=" + moduleName2 + "," + ObjectNameUtil.INSTANCE_NAME_KEY
+                + "=" + instName2);
+        mapSub = new HashMap<String, ObjectName>();
+        map = new HashMap<String, Map<String,ObjectName>>();
     }
 
     @Override
     public ObjectName createModule(String moduleName, String instanceName)
             throws InstanceAlreadyExistsException {
-        return null;
+        //return null;
+        return ObjectNameUtil.createON(ObjectNameUtil.ON_DOMAIN
+                + ":type=Module," + ObjectNameUtil.MODULE_FACTORY_NAME_KEY
+                + "=" + moduleName);
     }
 
     @Override
     public void destroyModule(ObjectName objectName)
             throws InstanceNotFoundException {
+        if(objectName != null){
+            conf4 = null;
+        }
     }
 
     @Override
@@ -65,7 +82,8 @@ public class TestingConfigTransactionController implements
 
     @Override
     public String getTransactionName() {
-        return null;
+        //return null;
+        return "transactionName";
     }
 
     @Override
@@ -113,66 +131,69 @@ public class TestingConfigTransactionController implements
 
     @Override
     public void checkConfigBeanExists(ObjectName objectName) throws InstanceNotFoundException {
-        throw new UnsupportedOperationException();
+        check = "configBeanExists";
     }
 
     @Override
     public ObjectName saveServiceReference(String serviceInterfaceName, String refName, ObjectName moduleON) throws InstanceNotFoundException {
-        throw new UnsupportedOperationException();
+        return moduleON;
     }
 
     @Override
     public void removeServiceReference(String serviceInterfaceName, String refName) {
-        throw new UnsupportedOperationException();
+        check = refName;
     }
 
     @Override
     public void removeAllServiceReferences() {
-        throw new UnsupportedOperationException();
+        check = null;
     }
 
     @Override
     public ObjectName lookupConfigBeanByServiceInterfaceName(String serviceInterfaceQName, String refName) {
-        throw new UnsupportedOperationException();
+        return conf3;
     }
 
     @Override
     public Map<String, Map<String, ObjectName>> getServiceMapping() {
-        throw new UnsupportedOperationException();
+        mapSub.put("A",conf2);
+        map.put("AA", mapSub);
+        return map;
     }
 
     @Override
     public Map<String, ObjectName> lookupServiceReferencesByServiceInterfaceName(String serviceInterfaceQName) {
-        throw new UnsupportedOperationException();
+        mapSub.put("A",conf2);
+        return mapSub;
     }
 
     @Override
     public Set<String> lookupServiceInterfaceNames(ObjectName objectName) throws InstanceNotFoundException {
-        throw new UnsupportedOperationException();
+        return Sets.newHashSet("setA");
     }
 
     @Override
     public String getServiceInterfaceName(String namespace, String localName) {
-        throw new UnsupportedOperationException();
+        return check=namespace+localName;
     }
 
     @Override
     public boolean removeServiceReferences(ObjectName objectName) throws InstanceNotFoundException {
-        throw new UnsupportedOperationException();
+        return true;
     }
 
     @Override
     public Set<String> getAvailableModuleFactoryQNames() {
-        throw new UnsupportedOperationException();
+        return Sets.newHashSet("availableModuleFactoryQNames");
     }
 
     @Override
     public ObjectName getServiceReference(String serviceInterfaceQName, String refName) throws InstanceNotFoundException {
-        throw new UnsupportedOperationException();
+        return conf3;
     }
 
     @Override
     public void checkServiceReferenceExists(ObjectName objectName) throws InstanceNotFoundException {
-        throw new UnsupportedOperationException();
+        check = "referenceExist";
     }
 }
diff --git a/opendaylight/distribution/opendaylight-karaf-resources/src/main/resources/bin/setenv b/opendaylight/distribution/opendaylight-karaf-resources/src/main/resources/bin/setenv
new file mode 100755 (executable)
index 0000000..947c65f
--- /dev/null
@@ -0,0 +1,55 @@
+#!/bin/sh
+#
+#    Licensed to the Apache Software Foundation (ASF) under one or more
+#    contributor license agreements.  See the NOTICE file distributed with
+#    this work for additional information regarding copyright ownership.
+#    The ASF licenses this file to You under the Apache License, Version 2.0
+#    (the "License"); you may not use this file except in compliance with
+#    the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+#
+
+#
+# handle specific scripts; the SCRIPT_NAME is exactly the name of the Karaf
+# script; for example karaf, start, stop, admin, client, ...
+#
+# if [ "$KARAF_SCRIPT" == "SCRIPT_NAME" ]; then
+#   Actions go here...
+# fi
+
+#
+# general settings which should be applied for all scripts go here; please keep
+# in mind that it is possible that scripts might be executed more than once, e.g.
+# in example of the start script where the start script is executed first and the
+# karaf script afterwards.
+#
+
+#
+# The following section shows the possible configuration options for the default 
+# karaf scripts
+#
+# export JAVA_HOME # Location of Java installation
+# export JAVA_MIN_MEM # Minimum memory for the JVM
+# export JAVA_MAX_MEM # Maximum memory for the JVM
+# export JAVA_PERM_MEM # Minimum perm memory for the JVM
+# export JAVA_MAX_PERM_MEM # Maximum perm memory for the JVM
+# export KARAF_HOME # Karaf home folder
+# export KARAF_DATA # Karaf data folder
+# export KARAF_BASE # Karaf base folder
+# export KARAF_ETC  # Karaf etc  folder
+# export KARAF_OPTS # Additional available Karaf options
+# export KARAF_DEBUG # Enable debug mode
+if [ "x$JAVA_MAX_PERM_MEM" = "x" ]; then
+    export JAVA_MAX_PERM_MEM="512m"
+fi
+if [ "x$JAVA_MAX_MEM" = "x" ]; then
+    export JAVA_MAX_MEM="2048m"
+fi
+
index 8a2aa59dfe59d2e9fb9ec9f0ab27abb6f89df11e..cdb65420135d1f71ebe1e66eb67dec8efda762dd 100644 (file)
@@ -119,7 +119,7 @@ controllerTrustStorePassword=
 enableStrongPasswordCheck = false
 
 #Jolokia configurations
-org.jolokia.listenForHttpService=false
+#org.jolokia.listenForHttpService=false
 
 # Logging configuration for Tomcat-JUL logging
 java.util.logging.config.file=configuration/tomcat-logging.properties
@@ -127,3 +127,9 @@ java.util.logging.config.file=configuration/tomcat-logging.properties
 #Hosttracker hostsdb key scheme setting
 hosttracker.keyscheme=IP
 
+# LISP Flow Mapping configuration
+# Map-Register messages overwrite existing RLOC sets in EID-to-RLOC mappings
+lisp.mappingOverwrite = true
+# Enable the Solicit-Map-Request (SMR) mechanism
+lisp.smr = false
+
diff --git a/opendaylight/md-sal/benchmark-data-store/pom.xml b/opendaylight/md-sal/benchmark-data-store/pom.xml
new file mode 100644 (file)
index 0000000..1af2287
--- /dev/null
@@ -0,0 +1,72 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+
+This program and the accompanying materials are made available under the
+terms of the Eclipse Public License v1.0 which accompanies this distribution,
+and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>sal-parent</artifactId>
+    <groupId>org.opendaylight.controller</groupId>
+    <version>1.1-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+
+  <groupId>org.opendaylight.controller</groupId>
+  <artifactId>benchmark-data-store</artifactId>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-impl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-parser-impl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.openjdk.jmh</groupId>
+      <artifactId>jmh-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.openjdk.jmh</groupId>
+      <artifactId>jmh-generator-annprocess</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-inmemory-datastore</artifactId>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <configuration>
+          <classpathScope>test</classpathScope>
+          <executable>java</executable>
+          <arguments>
+            <argument>-classpath</argument>
+            <classpath/>
+            <argument>org.openjdk.jmh.Main</argument>
+            <argument>.*</argument>
+          </arguments>
+        </configuration>
+        <executions>
+          <execution>
+            <id>run-benchmarks</id>
+            <phase>integration-test</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>
\ No newline at end of file
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryDatastoreWriteTransactionBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/AbstractInMemoryDatastoreWriteTransactionBenchmark.java
new file mode 100644 (file)
index 0000000..aa5ef61
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.api.CollectionNodeBuilder;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Warmup;
+
+/**
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+public abstract class AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+    private static final int WARMUP_ITERATIONS = 20;
+    private static final int MEASUREMENT_ITERATIONS = 20;
+
+    private static final int OUTER_LIST_100K = 100000;
+    private static final int OUTER_LIST_50K = 50000;
+    private static final int OUTER_LIST_10K = 10000;
+
+    private static final YangInstanceIdentifier[] OUTER_LIST_100K_PATHS = initOuterListPaths(OUTER_LIST_100K);
+    private static final YangInstanceIdentifier[] OUTER_LIST_50K_PATHS = initOuterListPaths(OUTER_LIST_50K);
+    private static final YangInstanceIdentifier[] OUTER_LIST_10K_PATHS = initOuterListPaths(OUTER_LIST_10K);
+
+    private static YangInstanceIdentifier[] initOuterListPaths(final int outerListPathsCount) {
+        final YangInstanceIdentifier[] paths = new YangInstanceIdentifier[outerListPathsCount];
+
+        for (int outerListKey = 0; outerListKey < outerListPathsCount; ++outerListKey) {
+            paths[outerListKey] = YangInstanceIdentifier.builder(BenchmarkModel.OUTER_LIST_PATH)
+                .nodeWithKey(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
+                .build();
+        }
+        return paths;
+    }
+
+    private static final MapNode ONE_ITEM_INNER_LIST = initInnerListItems(1);
+    private static final MapNode TWO_ITEM_INNER_LIST = initInnerListItems(2);
+    private static final MapNode TEN_ITEM_INNER_LIST = initInnerListItems(10);
+
+    private static MapNode initInnerListItems(final int count) {
+        final CollectionNodeBuilder<MapEntryNode, MapNode> mapEntryBuilder = ImmutableNodes
+            .mapNodeBuilder(BenchmarkModel.INNER_LIST_QNAME);
+
+        for (int i = 1; i <= count; ++i) {
+            mapEntryBuilder
+                .withChild(ImmutableNodes.mapEntry(BenchmarkModel.INNER_LIST_QNAME, BenchmarkModel.NAME_QNAME, i));
+        }
+        return mapEntryBuilder.build();
+    }
+
+    private static final NormalizedNode<?, ?>[] OUTER_LIST_ONE_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_100K, ONE_ITEM_INNER_LIST);
+    private static final NormalizedNode<?, ?>[] OUTER_LIST_TWO_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_50K, TWO_ITEM_INNER_LIST);
+    private static final NormalizedNode<?, ?>[] OUTER_LIST_TEN_ITEM_INNER_LIST = initOuterListItems(OUTER_LIST_10K, TEN_ITEM_INNER_LIST);
+
+    private static NormalizedNode<?,?>[] initOuterListItems(int outerListItemsCount, MapNode innerList) {
+        final NormalizedNode<?,?>[] outerListItems = new NormalizedNode[outerListItemsCount];
+
+        for (int i = 0; i < outerListItemsCount; ++i) {
+            int outerListKey = i;
+            outerListItems[i] = ImmutableNodes.mapEntryBuilder(BenchmarkModel.OUTER_LIST_QNAME, BenchmarkModel.ID_QNAME, outerListKey)
+                .withChild(innerList).build();
+        }
+        return outerListItems;
+    }
+
+    protected SchemaContext schemaContext;
+    protected InMemoryDOMDataStore domStore;
+
+    abstract public void setUp() throws Exception;
+
+    abstract public void tearDown();
+
+    protected void initTestNode() throws Exception {
+        final YangInstanceIdentifier testPath = YangInstanceIdentifier.builder(BenchmarkModel.TEST_PATH)
+            .build();
+        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+        writeTx.write(testPath, provideOuterListNode());
+
+        DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+        cohort.canCommit().get();
+        cohort.preCommit().get();
+        cohort.commit().get();
+    }
+
+    private DataContainerChild<?, ?> provideOuterListNode() {
+        return ImmutableContainerNodeBuilder
+            .create()
+            .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(BenchmarkModel.TEST_QNAME))
+            .withChild(
+                ImmutableNodes.mapNodeBuilder(BenchmarkModel.OUTER_LIST_QNAME)
+                    .build()).build();
+    }
+
+    @Benchmark
+    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+    public void write100KSingleNodeWithOneInnerItemInOneCommitBenchmark() throws Exception {
+        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+        for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
+            writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
+        }
+        DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+        cohort.canCommit().get();
+        cohort.preCommit().get();
+        cohort.commit().get();
+    }
+
+    @Benchmark
+    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+    public void write100KSingleNodeWithOneInnerItemInCommitPerWriteBenchmark() throws Exception {
+        for (int outerListKey = 0; outerListKey < OUTER_LIST_100K; ++outerListKey) {
+            DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+            writeTx.write(OUTER_LIST_100K_PATHS[outerListKey], OUTER_LIST_ONE_ITEM_INNER_LIST[outerListKey]);
+
+            DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+            cohort.canCommit().get();
+            cohort.preCommit().get();
+            cohort.commit().get();
+        }
+    }
+
+    @Benchmark
+    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+    public void write50KSingleNodeWithTwoInnerItemsInOneCommitBenchmark() throws Exception {
+        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+        for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
+            writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
+        }
+        DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+        cohort.canCommit().get();
+        cohort.preCommit().get();
+        cohort.commit().get();
+    }
+
+    @Benchmark
+    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+    public void write50KSingleNodeWithTwoInnerItemsInCommitPerWriteBenchmark() throws Exception {
+        for (int outerListKey = 0; outerListKey < OUTER_LIST_50K; ++outerListKey) {
+            DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+            writeTx.write(OUTER_LIST_50K_PATHS[outerListKey], OUTER_LIST_TWO_ITEM_INNER_LIST[outerListKey]);
+            DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+            cohort.canCommit().get();
+            cohort.preCommit().get();
+            cohort.commit().get();
+        }
+    }
+
+    @Benchmark
+    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+    public void write10KSingleNodeWithTenInnerItemsInOneCommitBenchmark() throws Exception {
+        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+        for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
+            writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
+        }
+        DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+        cohort.canCommit().get();
+        cohort.preCommit().get();
+        cohort.commit().get();
+    }
+
+    @Benchmark
+    @Warmup(iterations = WARMUP_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+    @Measurement(iterations = MEASUREMENT_ITERATIONS, timeUnit = TimeUnit.MILLISECONDS)
+    public void write10KSingleNodeWithTenInnerItemsInCommitPerWriteBenchmark() throws Exception {
+        for (int outerListKey = 0; outerListKey < OUTER_LIST_10K; ++outerListKey) {
+            DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+            writeTx.write(OUTER_LIST_10K_PATHS[outerListKey], OUTER_LIST_TEN_ITEM_INNER_LIST[outerListKey]);
+            DOMStoreThreePhaseCommitCohort cohort = writeTx.ready();
+            cohort.canCommit().get();
+            cohort.preCommit().get();
+            cohort.commit().get();
+        }
+    }
+}
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/BenchmarkModel.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/BenchmarkModel.java
new file mode 100644 (file)
index 0000000..024385b
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import java.io.InputStream;
+import java.util.Collections;
+import java.util.Set;
+
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+
+/**
+ * Benchmark Model class loads the odl-datastore-test.yang model from resources.
+ * <br>
+ * This class serves as facilitator class which holds several references to initialized yang model as static final
+ * members.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+public final class BenchmarkModel {
+
+    public static final QName TEST_QNAME = QName
+        .create("urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test", "2014-03-13","test");
+    public static final QName OUTER_LIST_QNAME = QName.create(TEST_QNAME, "outer-list");
+    public static final QName INNER_LIST_QNAME = QName.create(TEST_QNAME, "inner-list");
+    public static final QName ID_QNAME = QName.create(TEST_QNAME, "id");
+    public static final QName NAME_QNAME = QName.create(TEST_QNAME, "name");
+    private static final String DATASTORE_TEST_YANG = "/odl-datastore-test.yang";
+
+    public static final YangInstanceIdentifier TEST_PATH = YangInstanceIdentifier.of(TEST_QNAME);
+    public static final YangInstanceIdentifier OUTER_LIST_PATH = YangInstanceIdentifier.builder(TEST_PATH).node(OUTER_LIST_QNAME).build();
+
+    public static final InputStream getDatastoreBenchmarkInputStream() {
+        return getInputStream(DATASTORE_TEST_YANG);
+    }
+
+    private static InputStream getInputStream(final String resourceName) {
+        return BenchmarkModel.class.getResourceAsStream(resourceName);
+    }
+
+    public static SchemaContext createTestContext() {
+        YangParserImpl parser = new YangParserImpl();
+        Set<Module> modules = parser.parseYangModelsFromStreams(Collections.singletonList(
+            getDatastoreBenchmarkInputStream()));
+        return parser.resolveSchemaContext(modules);
+    }
+}
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithExecutorServiceBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithExecutorServiceBenchmark.java
new file mode 100644 (file)
index 0000000..77a4966
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as BlockingBoundedFastThreadPool
+ * and DOM Store Executor Service as Blocking Bounded Fast Thread Pool.
+ *
+ * @see org.opendaylight.yangtools.util.concurrent.SpecialExecutors
+ * @see org.opendaylight.controller.md.sal.dom.store.benchmark.AbstractInMemoryDatastoreWriteTransactionBenchmark
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWithExecutorServiceBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark  {
+
+    private static final int MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20;
+    private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
+    private static final int MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE = 5000;
+
+    @Override
+    @Setup(Level.Trial)
+    public void setUp() throws Exception {
+        final String name = "DS_BENCHMARK";
+        final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
+            MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL");
+
+        final ListeningExecutorService domStoreExecutor = MoreExecutors.listeningDecorator(SpecialExecutors.newBoundedSingleThreadExecutor(
+            MAX_DATA_STORE_EXECUTOR_QUEUE_SIZE, "DOMStore-" + name ));
+
+        domStore = new InMemoryDOMDataStore(name, domStoreExecutor,
+            dataChangeListenerExecutor);
+        schemaContext = BenchmarkModel.createTestContext();
+        domStore.onGlobalContextUpdated(schemaContext);
+        initTestNode();
+    }
+
+    @Override
+    @TearDown
+    public void tearDown() {
+        schemaContext = null;
+        domStore = null;
+    }
+}
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithSameThreadedExecutorBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWithSameThreadedExecutorBenchmark.java
new file mode 100644 (file)
index 0000000..6a0cecc
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as Blocking Bounded Fast Thread Pool
+ * and DOM Store Executor Service as Same Thread Executor.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWithSameThreadedExecutorBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+    private static final int MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE = 20;
+    private static final int MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE = 1000;
+
+    @Setup(Level.Trial)
+    public void setUp() throws Exception {
+        final String name = "DS_BENCHMARK";
+        final ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
+            MAX_DATA_CHANGE_EXECUTOR_POOL_SIZE, MAX_DATA_CHANGE_EXECUTOR_QUEUE_SIZE, name + "-DCL");
+
+        domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", MoreExecutors.sameThreadExecutor(),
+            dataChangeListenerExecutor);
+        schemaContext = BenchmarkModel.createTestContext();
+        domStore.onGlobalContextUpdated(schemaContext);
+        initTestNode();
+    }
+
+    @TearDown
+    public void tearDown() {
+        schemaContext = null;
+        domStore = null;
+    }
+}
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWriteTransactionBenchmark.java b/opendaylight/md-sal/benchmark-data-store/src/main/java/org/opendaylight/controller/md/sal/dom/store/benchmark/InMemoryDataStoreWriteTransactionBenchmark.java
new file mode 100644 (file)
index 0000000..d3dda96
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.dom.store.benchmark;
+
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.TimeUnit;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.TearDown;
+
+/**
+ * Benchmark for testing of performance of write operations for InMemoryDataStore. The instance
+ * of benchmark creates InMemoryDataStore with Data Change Listener Executor Service as Same Thread Executor
+ * and DOM Store Executor Service as Same Thread Executor.
+ *
+ * @author Lukas Sedlak <lsedlak@cisco.com>
+ */
+@State(Scope.Thread)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Fork(1)
+public class InMemoryDataStoreWriteTransactionBenchmark extends AbstractInMemoryDatastoreWriteTransactionBenchmark {
+
+    @Setup(Level.Trial)
+    public void setUp() throws Exception {
+        domStore = new InMemoryDOMDataStore("SINGLE_THREADED_DS_BENCHMARK", MoreExecutors.sameThreadExecutor(),
+            MoreExecutors.sameThreadExecutor());
+        schemaContext = BenchmarkModel.createTestContext();
+        domStore.onGlobalContextUpdated(schemaContext);
+        initTestNode();
+    }
+
+    @TearDown
+    public void tearDown() {
+        schemaContext = null;
+        domStore = null;
+    }
+}
diff --git a/opendaylight/md-sal/benchmark-data-store/src/main/resources/odl-datastore-test.yang b/opendaylight/md-sal/benchmark-data-store/src/main/resources/odl-datastore-test.yang
new file mode 100644 (file)
index 0000000..730ca17
--- /dev/null
@@ -0,0 +1,42 @@
+module odl-datastore-test {
+    yang-version 1;
+    namespace "urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:store:test";
+    prefix "store-test";
+    
+    revision "2014-03-13" {
+        description "Initial revision.";
+    }
+
+    container test {
+        list outer-list {
+            key id;
+            leaf id {
+                type int32;
+            }
+            choice outer-choice {
+                case one {
+                    leaf one {
+                        type string;
+                    }
+                }
+                case two-three {
+                    leaf two {
+                        type string;
+                    }
+                    leaf three {
+                        type string;
+                    }
+               }
+           }
+           list inner-list {
+                key name;
+                leaf name {
+                    type int32;
+                }
+                leaf value {
+                    type string;
+                }
+            }
+        }
+    }
+}
\ No newline at end of file
index 1b648dc98c36c0c6e05bfce6740b294dd730e8b8..ecf1a94c18c8123dbcffe5c9d398ce69566f356d 100644 (file)
@@ -61,10 +61,16 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.model.match.types.rev131026
 
 import com.google.common.net.InetAddresses;
 
-public class FromSalConversionsUtils {
+/**
+ * MD-SAL to AD-SAL conversions collection
+ */
+public final class FromSalConversionsUtils {
 
-    private FromSalConversionsUtils() {
+    /** http://en.wikipedia.org/wiki/IPv4#Packet_structure (end of octet number 1, bit 14.+15.) */
+    public static final int ENC_FIELD_BIT_SIZE = 2;
 
+    private FromSalConversionsUtils() {
+        throw new IllegalAccessError("forcing no instance for factory");
     }
 
     @SuppressWarnings("unused")
@@ -469,5 +475,12 @@ public class FromSalConversionsUtils {
         return true;
     }
 
+    /**
+     * @param nwDscp NW-DSCP
+     * @return shifted to NW-TOS (with empty ECN part)
+     */
+    public static int dscpToTos(int nwDscp) {
+        return (short) (nwDscp << ENC_FIELD_BIT_SIZE);
+    }
 
 }
index 5837e35b3a65b7bb4a7b8fc7c0ae2517cbe07db6..00511bc74449adfac7c1d1f3bb0cc968ecb95162 100644 (file)
@@ -315,7 +315,7 @@ public final class MDFlowMapping {
 
     private static SetNwTosActionCase _toAction(final SetNwTos sourceAction) {
         return new SetNwTosActionCaseBuilder()
-        .setSetNwTosAction(new SetNwTosActionBuilder().setTos(sourceAction.getNwTos()).build())
+        .setSetNwTosAction(new SetNwTosActionBuilder().setTos(FromSalConversionsUtils.dscpToTos(sourceAction.getNwTos())).build())
         .build();
     }
 
index 28dd57c3b7986fecabae2c2fa9749e8d58fbd36e..dcc1a4660b5b71690419f377c84852f192b3c0dc 100644 (file)
@@ -128,7 +128,7 @@ public class ToSalConversionsUtils {
     private static final Logger LOG = LoggerFactory.getLogger(ToSalConversionsUtils.class);
 
     private ToSalConversionsUtils() {
-
+        throw new IllegalAccessError("forcing no instance for factory");
     }
 
     public static Flow toFlow(org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.rev131026.Flow source, Node node) {
@@ -287,7 +287,7 @@ public class ToSalConversionsUtils {
             } else if (sourceAction instanceof SetNwTosActionCase) {
                 Integer tos = ((SetNwTosActionCase) sourceAction).getSetNwTosAction().getTos();
                 if (tos != null) {
-                    targetAction.add(new SetNwTos(tos));
+                    targetAction.add(new SetNwTos(ToSalConversionsUtils.tosToNwDscp(tos)));
                 }
             } else if (sourceAction instanceof SetTpDstActionCase) {
                 PortNumber port = ((SetTpDstActionCase) sourceAction).getSetTpDstAction().getPort();
@@ -643,4 +643,12 @@ public class ToSalConversionsUtils {
 
         return mac;
     }
+
+    /**
+     * @param nwTos NW-TOS
+     * @return shifted to NW-DSCP
+     */
+    public static int tosToNwDscp(int nwTos) {
+        return (short) (nwTos >>> FromSalConversionsUtils.ENC_FIELD_BIT_SIZE);
+    }
 }
diff --git a/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/FromSalConversionsUtilsTest.java b/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/FromSalConversionsUtilsTest.java
new file mode 100644 (file)
index 0000000..b09e816
--- /dev/null
@@ -0,0 +1,31 @@
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.compatibility.test;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.sal.compatibility.FromSalConversionsUtils;
+
+/**
+ * test of {@link FromSalConversionsUtils}
+ */
+public class FromSalConversionsUtilsTest {
+
+    /**
+     * Test method for {@link org.opendaylight.controller.sal.compatibility.FromSalConversionsUtils#dscpToTos(int)}.
+     */
+    @Test
+    public void testDscpToTos() {
+        Assert.assertEquals(0, FromSalConversionsUtils.dscpToTos(0));
+        Assert.assertEquals(4, FromSalConversionsUtils.dscpToTos(1));
+        Assert.assertEquals(252, FromSalConversionsUtils.dscpToTos(63));
+        Assert.assertEquals(256, FromSalConversionsUtils.dscpToTos(64));
+        Assert.assertEquals(-4, FromSalConversionsUtils.dscpToTos(-1));
+    }
+
+}
index 9f787b7e391010cee640d6b0582c4e0447ded4c2..98df90112deedfaa0815031802242bfae503784a 100644 (file)
@@ -293,7 +293,7 @@ public class TestFromSalConversionsUtils {
                     }
                     assertTrue("Ipv4 address wasn't found.", ipv4AddressFound);
                 } else if (innerAction instanceof SetNwTosActionCase) {
-                    assertEquals("Wrong TOS in SetNwTosAction.", (Integer) 63, ((SetNwTosActionCase) innerAction).getSetNwTosAction().getTos());
+                    assertEquals("Wrong TOS in SetNwTosAction.", (Integer) 252, ((SetNwTosActionCase) innerAction).getSetNwTosAction().getTos());
                 } else if (innerAction instanceof SetNwDstActionCase) {
                     Address address = ((SetNwDstActionCase) innerAction).getSetNwDstAction().getAddress();
                     boolean ipv4AddressFound = false;
index 60b77394c1f15e8055d93afa259862dccd33c5a3..16d0bb424d02746921d16f5c321915232f5219c8 100644 (file)
@@ -499,7 +499,7 @@ public class TestToSalConversionsUtils {
 
     private void prepareActionSetNwTos(SetNwTosActionCaseBuilder wrapper) {
         SetNwTosActionBuilder setNwTosActionBuilder = new SetNwTosActionBuilder();
-        setNwTosActionBuilder.setTos(63);
+        setNwTosActionBuilder.setTos(252);
         wrapper.setSetNwTosAction(setNwTosActionBuilder.build());
     }
 
diff --git a/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/ToSalConversionsUtilsTest.java b/opendaylight/md-sal/compatibility/sal-compatibility/src/test/java/org/opendaylight/controller/sal/compatibility/test/ToSalConversionsUtilsTest.java
new file mode 100644 (file)
index 0000000..aa25c18
--- /dev/null
@@ -0,0 +1,31 @@
+/**
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.compatibility.test;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.sal.compatibility.ToSalConversionsUtils;
+
+/**
+ * test of {@link ToSalConversionsUtils}
+ */
+public class ToSalConversionsUtilsTest {
+
+    /**
+     * Test method for {@link org.opendaylight.controller.sal.compatibility.ToSalConversionsUtils#tosToNwDscp(int)}.
+     */
+    @Test
+    public void testTosToNwDscp() {
+        Assert.assertEquals(0, ToSalConversionsUtils.tosToNwDscp(0));
+        Assert.assertEquals(0, ToSalConversionsUtils.tosToNwDscp(1));
+        Assert.assertEquals(1, ToSalConversionsUtils.tosToNwDscp(4));
+        Assert.assertEquals(63, ToSalConversionsUtils.tosToNwDscp(252));
+        Assert.assertEquals(63, ToSalConversionsUtils.tosToNwDscp(253));
+        Assert.assertEquals(-1, ToSalConversionsUtils.tosToNwDscp(-1));
+    }
+}
index e0c16a080676691080def9e86e12d22fe87883b0..96418596a4295cadf8a13d1b363d5fe7733abb38 100644 (file)
@@ -77,7 +77,7 @@ public class FlowForwarder extends AbstractListeningCommiter<Flow> {
         if (tableIdValidationPrecondition(tableKey, removeDataObj)) {
             final RemoveFlowInputBuilder builder = new RemoveFlowInputBuilder(removeDataObj);
             builder.setFlowRef(new FlowRef(identifier));
-            builder.setNode(new NodeRef(nodeIdent));
+            builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
             builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
             builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
             this.provider.getSalFlowService().removeFlow(builder.build());
@@ -93,7 +93,7 @@ public class FlowForwarder extends AbstractListeningCommiter<Flow> {
         if (tableIdValidationPrecondition(tableKey, update)) {
             final UpdateFlowInputBuilder builder = new UpdateFlowInputBuilder();
 
-            builder.setNode(new NodeRef(nodeIdent));
+            builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
             builder.setFlowRef(new FlowRef(identifier));
             builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
             builder.setUpdatedFlow((new UpdatedFlowBuilder(update)).build());
@@ -112,7 +112,7 @@ public class FlowForwarder extends AbstractListeningCommiter<Flow> {
         if (tableIdValidationPrecondition(tableKey, addDataObj)) {
             final AddFlowInputBuilder builder = new AddFlowInputBuilder(addDataObj);
 
-            builder.setNode(new NodeRef(nodeIdent));
+            builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
             builder.setFlowRef(new FlowRef(identifier));
             builder.setFlowTable(new FlowTableRef(nodeIdent.child(Table.class, tableKey)));
             builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
@@ -129,7 +129,7 @@ public class FlowForwarder extends AbstractListeningCommiter<Flow> {
     private boolean tableIdValidationPrecondition (final TableKey tableKey, final Flow flow) {
         Preconditions.checkNotNull(tableKey, "TableKey can not be null or empty!");
         Preconditions.checkNotNull(flow, "Flow can not be null or empty!");
-        if (flow.getTableId() != tableKey.getId()) {
+        if (! tableKey.getId().equals(flow.getTableId())) {
             LOG.error("TableID in URI tableId={} and in palyload tableId={} is not same.",
                     flow.getTableId(), tableKey.getId());
             return false;
index 72e35ce8dbd84b2b22c766c3a9da3d893b855872..1b2c5323233edb3d30d04306801bee798e581288 100644 (file)
@@ -78,7 +78,7 @@ public class GroupForwarder extends AbstractListeningCommiter<Group> {
         final Group group = (removeDataObj);
         final RemoveGroupInputBuilder builder = new RemoveGroupInputBuilder(group);
 
-        builder.setNode(new NodeRef(nodeIdent));
+        builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
         builder.setGroupRef(new GroupRef(identifier));
         builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
         this.provider.getSalGroupService().removeGroup(builder.build());
@@ -93,7 +93,7 @@ public class GroupForwarder extends AbstractListeningCommiter<Group> {
         final Group updatedGroup = (update);
         final UpdateGroupInputBuilder builder = new UpdateGroupInputBuilder();
 
-        builder.setNode(new NodeRef(nodeIdent));
+        builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
         builder.setGroupRef(new GroupRef(identifier));
         builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
         builder.setUpdatedGroup((new UpdatedGroupBuilder(updatedGroup)).build());
@@ -109,7 +109,7 @@ public class GroupForwarder extends AbstractListeningCommiter<Group> {
         final Group group = (addDataObj);
         final AddGroupInputBuilder builder = new AddGroupInputBuilder(group);
 
-        builder.setNode(new NodeRef(nodeIdent));
+        builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
         builder.setGroupRef(new GroupRef(identifier));
         builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
         this.provider.getSalGroupService().addGroup(builder.build());
index 8a805b029729116b8094d13ef504e05bdd918262..2f3de2a171f2a0c2e0f07385ca503c69761252a9 100644 (file)
@@ -77,7 +77,7 @@ public class MeterForwarder extends AbstractListeningCommiter<Meter> {
 
         final RemoveMeterInputBuilder builder = new RemoveMeterInputBuilder(removeDataObj);
 
-        builder.setNode(new NodeRef(nodeIdent));
+        builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
         builder.setMeterRef(new MeterRef(identifier));
         builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
         this.provider.getSalMeterService().removeMeter(builder.build());
@@ -90,7 +90,7 @@ public class MeterForwarder extends AbstractListeningCommiter<Meter> {
 
         final UpdateMeterInputBuilder builder = new UpdateMeterInputBuilder();
 
-        builder.setNode(new NodeRef(nodeIdent));
+        builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
         builder.setMeterRef(new MeterRef(identifier));
         builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
         builder.setUpdatedMeter((new UpdatedMeterBuilder(update)).build());
@@ -105,7 +105,7 @@ public class MeterForwarder extends AbstractListeningCommiter<Meter> {
 
         final AddMeterInputBuilder builder = new AddMeterInputBuilder(addDataObj);
 
-        builder.setNode(new NodeRef(nodeIdent));
+        builder.setNode(new NodeRef(nodeIdent.firstIdentifierOf(Node.class)));
         builder.setMeterRef(new MeterRef(identifier));
         builder.setTransactionUri(new Uri(provider.getNewTransactionId()));
         this.provider.getSalMeterService().addMeter(builder.build());
index ce830eaa62d3e1f23c288f1969b1e79969252cf7..71a0de9939a2693e2a93a62a1611ff852ab59b2c 100644 (file)
         <module>sal-binding-dom-it</module>
       </modules>
     </profile>
+    <profile>
+      <id>benchmarks</id>
+      <activation>
+        <activeByDefault>false</activeByDefault>
+      </activation>
+      <modules>
+        <module>benchmark-data-store</module>
+      </modules>
+    </profile>
   </profiles>
 </project>
\ No newline at end of file
index 98c81c267fae2c8dd5ec70ca9d663fc33b5e340c..e68e7815252f5326ba2af41eea0fc43ba826e87a 100644 (file)
@@ -99,6 +99,7 @@
             <Bundle-Name>${project.groupId}.${project.artifactId}</Bundle-Name>
             <Export-package>org.opendaylight.cluster.raft</Export-package>
             <Import-Package>*</Import-Package>
+            <DynamicImport-Package>*</DynamicImport-Package>
           </instructions>
         </configuration>
       </plugin>
index c4ff108611d9fbdb177f2ef4ace98bb030d69991..3bfdf732cf01cd3d3898158bc4b8e62585e5a9f5 100644 (file)
@@ -67,11 +67,15 @@ public class ExampleActor extends RaftActor {
             }
 
         } else if (message instanceof PrintState) {
-            LOG.debug("State of the node:{} has entries={}, {}",
-                getId(), state.size(), getReplicatedLogState());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("State of the node:{} has entries={}, {}",
+                    getId(), state.size(), getReplicatedLogState());
+            }
 
         } else if (message instanceof PrintRole) {
-            LOG.debug("{} = {}, Peers={}", getId(), getRaftState(),getPeers());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("{} = {}, Peers={}", getId(), getRaftState(), getPeers());
+            }
 
         } else {
             super.onReceiveCommand(message);
@@ -106,7 +110,9 @@ public class ExampleActor extends RaftActor {
         } catch (Exception e) {
            LOG.error("Exception in applying snapshot", e);
         }
-        LOG.debug("Snapshot applied to state :" + ((HashMap) state).size());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Snapshot applied to state :" + ((HashMap) state).size());
+        }
     }
 
     private ByteString fromObject(Object snapshot) throws Exception {
index 978ea91089dbcb1a530c9d66f6878ffa06579e2d..cb51a8951a54f158bac9b7bf1cab769662568fc3 100644 (file)
@@ -7,7 +7,6 @@ import org.opendaylight.controller.cluster.example.messages.PrintRole;
 import org.opendaylight.controller.cluster.example.messages.PrintState;
 import org.opendaylight.controller.cluster.raft.ConfigParams;
 import org.opendaylight.controller.cluster.raft.client.messages.AddRaftPeer;
-import org.opendaylight.controller.cluster.raft.client.messages.RemoveRaftPeer;
 
 import java.io.BufferedReader;
 import java.io.InputStreamReader;
@@ -196,11 +195,6 @@ public class TestDriver {
 
         actorSystem.stop(actorRef);
         actorRefs.remove(actorName);
-
-        for (ActorRef actor : actorRefs.values()) {
-            actor.tell(new RemoveRaftPeer(actorName), null);
-        }
-
         allPeers.remove(actorName);
     }
 
@@ -209,11 +203,6 @@ public class TestDriver {
         allPeers.put(actorName, address);
 
         ActorRef exampleActor = createExampleActor(actorName);
-
-        for (ActorRef actor : actorRefs.values()) {
-            actor.tell(new AddRaftPeer(actorName, address), null);
-        }
-
         actorRefs.put(actorName, exampleActor);
 
         addClientsToNode(actorName, 1);
index 75c237f5035e57abd61c839835ec3c78548a6157..9d06f6360473097beefbbce34962d7433f447f88 100644 (file)
@@ -18,7 +18,7 @@ import java.util.concurrent.TimeUnit;
  */
 public class DefaultConfigParamsImpl implements ConfigParams {
 
-    private static final int SNAPSHOT_BATCH_COUNT = 100000;
+    private static final int SNAPSHOT_BATCH_COUNT = 20000;
 
     /**
      * The maximum election time variance
index 8135d837d3ad86563ad0246022941bfc6134b59d..8270f2949a67cc9fc00f5180dce41872ca6a8a47 100644 (file)
@@ -96,7 +96,7 @@ public abstract class RaftActor extends UntypedPersistentActor {
      * This context should NOT be passed directly to any other actor it is
      * only to be consumed by the RaftActorBehaviors
      */
-    private RaftActorContext context;
+    protected RaftActorContext context;
 
     /**
      * The in-memory journal
@@ -123,7 +123,7 @@ public abstract class RaftActor extends UntypedPersistentActor {
 
     @Override public void onReceiveRecover(Object message) {
         if (message instanceof SnapshotOffer) {
-            LOG.debug("SnapshotOffer called..");
+            LOG.info("SnapshotOffer called..");
             SnapshotOffer offer = (SnapshotOffer) message;
             Snapshot snapshot = (Snapshot) offer.snapshot();
 
@@ -134,25 +134,38 @@ public abstract class RaftActor extends UntypedPersistentActor {
 
             context.setReplicatedLog(replicatedLog);
             context.setLastApplied(snapshot.getLastAppliedIndex());
+            context.setCommitIndex(snapshot.getLastAppliedIndex());
 
-            LOG.debug("Applied snapshot to replicatedLog. " +
-                "snapshotIndex={}, snapshotTerm={}, journal-size={}",
+            LOG.info("Applied snapshot to replicatedLog. " +
+                    "snapshotIndex={}, snapshotTerm={}, journal-size={}",
                 replicatedLog.snapshotIndex, replicatedLog.snapshotTerm,
-                replicatedLog.size());
+                replicatedLog.size()
+            );
 
             // Apply the snapshot to the actors state
             applySnapshot(ByteString.copyFrom(snapshot.getState()));
 
         } else if (message instanceof ReplicatedLogEntry) {
-            replicatedLog.append((ReplicatedLogEntry) message);
+            ReplicatedLogEntry logEntry = (ReplicatedLogEntry) message;
+
+            // Apply State immediately
+            replicatedLog.append(logEntry);
+            applyState(null, "recovery", logEntry.getData());
+            context.setLastApplied(logEntry.getIndex());
+            context.setCommitIndex(logEntry.getIndex());
+
         } else if (message instanceof DeleteEntries) {
             replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+
         } else if (message instanceof UpdateElectionTerm) {
-            context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(), ((UpdateElectionTerm) message).getVotedFor());
+            context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
+                ((UpdateElectionTerm) message).getVotedFor());
+
         } else if (message instanceof RecoveryCompleted) {
-            LOG.debug(
+            LOG.info(
                 "RecoveryCompleted - Switching actor to Follower - " +
-                    "Last index in log:{}, snapshotIndex={}, snapshotTerm={}, " +
+                    "Persistence Id =  " + persistenceId() +
+                    " Last index in log:{}, snapshotIndex={}, snapshotTerm={}, " +
                     "journal-size={}",
                 replicatedLog.lastIndex(), replicatedLog.snapshotIndex,
                 replicatedLog.snapshotTerm, replicatedLog.size());
@@ -165,9 +178,11 @@ public abstract class RaftActor extends UntypedPersistentActor {
         if (message instanceof ApplyState){
             ApplyState applyState = (ApplyState) message;
 
-            LOG.debug("Applying state for log index {} data {}",
-                applyState.getReplicatedLogEntry().getIndex(),
-                applyState.getReplicatedLogEntry().getData());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Applying state for log index {} data {}",
+                    applyState.getReplicatedLogEntry().getIndex(),
+                    applyState.getReplicatedLogEntry().getData());
+            }
 
             applyState(applyState.getClientActor(), applyState.getIdentifier(),
                 applyState.getReplicatedLogEntry().getData());
@@ -175,9 +190,12 @@ public abstract class RaftActor extends UntypedPersistentActor {
         } else if(message instanceof ApplySnapshot ) {
             Snapshot snapshot = ((ApplySnapshot) message).getSnapshot();
 
-            LOG.debug("ApplySnapshot called on Follower Actor " +
-                "snapshotIndex:{}, snapshotTerm:{}", snapshot.getLastAppliedIndex(),
-                snapshot.getLastAppliedTerm());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("ApplySnapshot called on Follower Actor " +
+                        "snapshotIndex:{}, snapshotTerm:{}", snapshot.getLastAppliedIndex(),
+                    snapshot.getLastAppliedTerm()
+                );
+            }
             applySnapshot(ByteString.copyFrom(snapshot.getState()));
 
             //clears the followers log, sets the snapshot index to ensure adjusted-index works
@@ -229,23 +247,25 @@ public abstract class RaftActor extends UntypedPersistentActor {
             context.removePeer(rrp.getName());
 
         } else if (message instanceof CaptureSnapshot) {
-            LOG.debug("CaptureSnapshot received by actor");
+            LOG.info("CaptureSnapshot received by actor");
             CaptureSnapshot cs = (CaptureSnapshot)message;
             captureSnapshot = cs;
             createSnapshot();
 
         } else if (message instanceof CaptureSnapshotReply){
-            LOG.debug("CaptureSnapshotReply received by actor");
+            LOG.info("CaptureSnapshotReply received by actor");
             CaptureSnapshotReply csr = (CaptureSnapshotReply) message;
 
             ByteString stateInBytes = csr.getSnapshot();
-            LOG.debug("CaptureSnapshotReply stateInBytes size:{}", stateInBytes.size());
+            LOG.info("CaptureSnapshotReply stateInBytes size:{}", stateInBytes.size());
             handleCaptureSnapshotReply(stateInBytes);
 
         } else {
             if (!(message instanceof AppendEntriesMessages.AppendEntries)
                 && !(message instanceof AppendEntriesReply) && !(message instanceof SendHeartBeat)) {
-                LOG.debug("onReceiveCommand: message:" + message.getClass());
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("onReceiveCommand: message:" + message.getClass());
+                }
             }
 
             RaftState state =
@@ -255,6 +275,8 @@ public abstract class RaftActor extends UntypedPersistentActor {
             if(oldBehavior != currentBehavior){
                 onStateChanged();
             }
+
+            onLeaderChanged(oldBehavior.getLeaderId(), currentBehavior.getLeaderId());
         }
     }
 
@@ -284,7 +306,9 @@ public abstract class RaftActor extends UntypedPersistentActor {
             context.getReplicatedLog().lastIndex() + 1,
             context.getTermInformation().getCurrentTerm(), data);
 
-        LOG.debug("Persist data {}", replicatedLogEntry);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Persist data {}", replicatedLogEntry);
+        }
 
         replicatedLog
             .appendAndPersist(clientActor, identifier, replicatedLogEntry);
@@ -419,6 +443,8 @@ public abstract class RaftActor extends UntypedPersistentActor {
      */
     protected abstract void onStateChanged();
 
+    protected void onLeaderChanged(String oldLeader, String newLeader){};
+
     private RaftActorBehavior switchBehavior(RaftState state) {
         if (currentBehavior != null) {
             if (currentBehavior.state() == state) {
@@ -471,8 +497,10 @@ public abstract class RaftActor extends UntypedPersistentActor {
             return null;
         }
         String peerAddress = context.getPeerAddress(leaderId);
-        LOG.debug("getLeaderAddress leaderId = " + leaderId + " peerAddress = "
-            + peerAddress);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("getLeaderAddress leaderId = " + leaderId + " peerAddress = "
+                + peerAddress);
+        }
 
         return peerAddress;
     }
@@ -572,10 +600,13 @@ public abstract class RaftActor extends UntypedPersistentActor {
                                 lastAppliedTerm = lastAppliedEntry.getTerm();
                             }
 
-                            LOG.debug("Snapshot Capture logSize: {}", journal.size());
-                            LOG.debug("Snapshot Capture lastApplied:{} ", context.getLastApplied());
-                            LOG.debug("Snapshot Capture lastAppliedIndex:{}", lastAppliedIndex);
-                            LOG.debug("Snapshot Capture lastAppliedTerm:{}", lastAppliedTerm);
+                            if(LOG.isDebugEnabled()) {
+                                LOG.debug("Snapshot Capture logSize: {}", journal.size());
+                                LOG.debug("Snapshot Capture lastApplied:{} ",
+                                    context.getLastApplied());
+                                LOG.debug("Snapshot Capture lastAppliedIndex:{}", lastAppliedIndex);
+                                LOG.debug("Snapshot Capture lastAppliedTerm:{}", lastAppliedTerm);
+                            }
 
                             // send a CaptureSnapshot to self to make the expensive operation async.
                             getSelf().tell(new CaptureSnapshot(
@@ -627,8 +658,9 @@ public abstract class RaftActor extends UntypedPersistentActor {
         }
 
         @Override public void update(long currentTerm, String votedFor) {
-            LOG.debug("Set currentTerm={}, votedFor={}", currentTerm, votedFor);
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Set currentTerm={}, votedFor={}", currentTerm, votedFor);
+            }
             this.currentTerm = currentTerm;
             this.votedFor = votedFor;
         }
index 7e896fed29c4889f6aec5ce39436a1970a50e03b..35d563b784cf3f4705784a78952249ce06badbea 100644 (file)
@@ -272,6 +272,17 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
         return null;
     }
 
+    /**
+     * Find the client request tracker for a specific logIndex
+     *
+     * @param logIndex
+     * @return
+     */
+    protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
+        return null;
+    }
+
+
     /**
      * Find the log index from the previous to last entry in the log
      *
@@ -311,7 +322,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
              i < index + 1; i++) {
             ActorRef clientActor = null;
             String identifier = null;
-            ClientRequestTracker tracker = findClientRequestTracker(i);
+            ClientRequestTracker tracker = removeClientRequestTracker(i);
 
             if (tracker != null) {
                 clientActor = tracker.getClientActor();
@@ -321,19 +332,19 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
                 context.getReplicatedLog().get(i);
 
             if (replicatedLogEntry != null) {
+                // Send a local message to the local RaftActor (it's derived class to be
+                // specific to apply the log to it's index)
                 actor().tell(new ApplyState(clientActor, identifier,
                     replicatedLogEntry), actor());
                 newLastApplied = i;
             } else {
                 //if one index is not present in the log, no point in looping
                 // around as the rest wont be present either
-                context.getLogger().error(
+                context.getLogger().warning(
                     "Missing index {} from log. Cannot apply state. Ignoring {} to {}", i, i, index );
                 break;
             }
         }
-        // Send a local message to the local RaftActor (it's derived class to be
-        // specific to apply the log to it's index)
         context.getLogger().debug("Setting last applied to {}", newLastApplied);
         context.setLastApplied(newLastApplied);
     }
index 610fdc987fde7a1a51491ef25dc2f764011b9eda..1cfdf9dba8b912a5bc23f78b85c447621298d908 100644 (file)
@@ -9,6 +9,7 @@
 package org.opendaylight.controller.cluster.raft.behaviors;
 
 import akka.actor.ActorRef;
+import akka.event.LoggingAdapter;
 import com.google.protobuf.ByteString;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
@@ -38,9 +39,13 @@ import java.util.ArrayList;
 public class Follower extends AbstractRaftActorBehavior {
     private ByteString snapshotChunksCollected = ByteString.EMPTY;
 
+    private final LoggingAdapter LOG;
+
     public Follower(RaftActorContext context) {
         super(context);
 
+        LOG = context.getLogger();
+
         scheduleElection(electionDuration());
     }
 
@@ -48,8 +53,9 @@ public class Follower extends AbstractRaftActorBehavior {
         AppendEntries appendEntries) {
 
         if(appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) {
-            context.getLogger()
-                .debug(appendEntries.toString());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug(appendEntries.toString());
+            }
         }
 
         // TODO : Refactor this method into a bunch of smaller methods
@@ -79,9 +85,10 @@ public class Follower extends AbstractRaftActorBehavior {
             // an entry at prevLogIndex and this follower has no entries in
             // it's log.
 
-            context.getLogger().debug(
-                "The followers log is empty and the senders prevLogIndex is {}",
-                appendEntries.getPrevLogIndex());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("The followers log is empty and the senders prevLogIndex is {}",
+                    appendEntries.getPrevLogIndex());
+            }
 
         } else if (lastIndex() > -1
             && appendEntries.getPrevLogIndex() != -1
@@ -90,9 +97,10 @@ public class Follower extends AbstractRaftActorBehavior {
             // The follower's log is out of sync because the Leader's
             // prevLogIndex entry was not found in it's log
 
-            context.getLogger().debug(
-                "The log is not empty but the prevLogIndex {} was not found in it",
-                appendEntries.getPrevLogIndex());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("The log is not empty but the prevLogIndex {} was not found in it",
+                    appendEntries.getPrevLogIndex());
+            }
 
         } else if (lastIndex() > -1
             && previousEntry != null
@@ -102,10 +110,12 @@ public class Follower extends AbstractRaftActorBehavior {
             // prevLogIndex entry does exist in the follower's log but it has
             // a different term in it
 
-            context.getLogger().debug(
-                "Cannot append entries because previous entry term {}  is not equal to append entries prevLogTerm {}"
-                , previousEntry.getTerm()
-                , appendEntries.getPrevLogTerm());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug(
+                    "Cannot append entries because previous entry term {}  is not equal to append entries prevLogTerm {}"
+                    , previousEntry.getTerm()
+                    , appendEntries.getPrevLogTerm());
+            }
         } else {
             outOfSync = false;
         }
@@ -113,9 +123,12 @@ public class Follower extends AbstractRaftActorBehavior {
         if (outOfSync) {
             // We found that the log was out of sync so just send a negative
             // reply and return
-            context.getLogger().debug("Follower is out-of-sync, " +
-                "so sending negative reply, lastIndex():{}, lastTerm():{}",
-                lastIndex(), lastTerm());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Follower is out-of-sync, " +
+                        "so sending negative reply, lastIndex():{}, lastTerm():{}",
+                    lastIndex(), lastTerm()
+                );
+            }
             sender.tell(
                 new AppendEntriesReply(context.getId(), currentTerm(), false,
                     lastIndex(), lastTerm()), actor()
@@ -125,10 +138,12 @@ public class Follower extends AbstractRaftActorBehavior {
 
         if (appendEntries.getEntries() != null
             && appendEntries.getEntries().size() > 0) {
-            context.getLogger().debug(
-                "Number of entries to be appended = " + appendEntries
-                    .getEntries().size()
-            );
+            if(LOG.isDebugEnabled()) {
+                LOG.debug(
+                    "Number of entries to be appended = " + appendEntries
+                        .getEntries().size()
+                );
+            }
 
             // 3. If an existing entry conflicts with a new one (same index
             // but different terms), delete the existing entry and all that
@@ -151,10 +166,12 @@ public class Follower extends AbstractRaftActorBehavior {
                         continue;
                     }
 
-                    context.getLogger().debug(
-                        "Removing entries from log starting at "
-                            + matchEntry.getIndex()
-                    );
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug(
+                            "Removing entries from log starting at "
+                                + matchEntry.getIndex()
+                        );
+                    }
 
                     // Entries do not match so remove all subsequent entries
                     context.getReplicatedLog()
@@ -163,10 +180,12 @@ public class Follower extends AbstractRaftActorBehavior {
                 }
             }
 
-            context.getLogger().debug(
-                "After cleanup entries to be added from = " + (addEntriesFrom
-                    + lastIndex())
-            );
+            if(LOG.isDebugEnabled()) {
+                context.getLogger().debug(
+                    "After cleanup entries to be added from = " + (addEntriesFrom
+                        + lastIndex())
+                );
+            }
 
             // 4. Append any new entries not already in the log
             for (int i = addEntriesFrom;
@@ -181,8 +200,9 @@ public class Follower extends AbstractRaftActorBehavior {
                     .appendAndPersist(appendEntries.getEntries().get(i));
             }
 
-            context.getLogger().debug(
-                "Log size is now " + context.getReplicatedLog().size());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Log size is now " + context.getReplicatedLog().size());
+            }
         }
 
 
@@ -195,8 +215,9 @@ public class Follower extends AbstractRaftActorBehavior {
             context.getReplicatedLog().lastIndex()));
 
         if (prevCommitIndex != context.getCommitIndex()) {
-            context.getLogger()
-                .debug("Commit index set to " + context.getCommitIndex());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Commit index set to " + context.getCommitIndex());
+            }
         }
 
         // If commitIndex > lastApplied: increment lastApplied, apply
@@ -204,10 +225,14 @@ public class Follower extends AbstractRaftActorBehavior {
         // check if there are any entries to be applied. last-applied can be equal to last-index
         if (appendEntries.getLeaderCommit() > context.getLastApplied() &&
             context.getLastApplied() < lastIndex()) {
-            context.getLogger().debug("applyLogToStateMachine, " +
-                "appendEntries.getLeaderCommit():{}," +
-                "context.getLastApplied():{}, lastIndex():{}",
-                appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("applyLogToStateMachine, " +
+                        "appendEntries.getLeaderCommit():{}," +
+                        "context.getLastApplied():{}, lastIndex():{}",
+                    appendEntries.getLeaderCommit(), context.getLastApplied(), lastIndex()
+                );
+            }
+
             applyLogToStateMachine(appendEntries.getLeaderCommit());
         }
 
@@ -259,9 +284,13 @@ public class Follower extends AbstractRaftActorBehavior {
     }
 
     private void handleInstallSnapshot(ActorRef sender, InstallSnapshot installSnapshot) {
-        context.getLogger().debug("InstallSnapshot received by follower " +
-            "datasize:{} , Chunk:{}/{}", installSnapshot.getData().size(),
-            installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks());
+
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("InstallSnapshot received by follower " +
+                    "datasize:{} , Chunk:{}/{}", installSnapshot.getData().size(),
+                installSnapshot.getChunkIndex(), installSnapshot.getTotalChunks()
+            );
+        }
 
         try {
             if (installSnapshot.getChunkIndex() == installSnapshot.getTotalChunks()) {
@@ -283,8 +312,11 @@ public class Follower extends AbstractRaftActorBehavior {
             } else {
                 // we have more to go
                 snapshotChunksCollected = snapshotChunksCollected.concat(installSnapshot.getData());
-                context.getLogger().debug("Chunk={},snapshotChunksCollected.size:{}",
-                    installSnapshot.getChunkIndex(), snapshotChunksCollected.size());
+
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("Chunk={},snapshotChunksCollected.size:{}",
+                        installSnapshot.getChunkIndex(), snapshotChunksCollected.size());
+                }
             }
 
             sender.tell(new InstallSnapshotReply(
index 90948ffef7d8a5e1341bb8aede6b03ccf8dae344..199d2d61cf5bbbba34ad8cfa62709228331d2b0f 100644 (file)
@@ -11,6 +11,7 @@ package org.opendaylight.controller.cluster.raft.behaviors;
 import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.Cancellable;
+import akka.event.LoggingAdapter;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.ByteString;
 import org.opendaylight.controller.cluster.raft.ClientRequestTracker;
@@ -80,9 +81,13 @@ public class Leader extends AbstractRaftActorBehavior {
 
     private final int minReplicationCount;
 
+    private final LoggingAdapter LOG;
+
     public Leader(RaftActorContext context) {
         super(context);
 
+        LOG = context.getLogger();
+
         if (lastIndex() >= 0) {
             context.setCommitIndex(lastIndex());
         }
@@ -98,7 +103,9 @@ public class Leader extends AbstractRaftActorBehavior {
             followerToLog.put(followerId, followerLogInformation);
         }
 
-        context.getLogger().debug("Election:Leader has following peers:"+ followers);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Election:Leader has following peers:" + followers);
+        }
 
         if (followers.size() > 0) {
             minReplicationCount = (followers.size() + 1) / 2 + 1;
@@ -123,7 +130,9 @@ public class Leader extends AbstractRaftActorBehavior {
     @Override protected RaftState handleAppendEntries(ActorRef sender,
         AppendEntries appendEntries) {
 
-        context.getLogger().debug(appendEntries.toString());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug(appendEntries.toString());
+        }
 
         return state();
     }
@@ -132,8 +141,9 @@ public class Leader extends AbstractRaftActorBehavior {
         AppendEntriesReply appendEntriesReply) {
 
         if(! appendEntriesReply.isSuccess()) {
-            context.getLogger()
-                .debug(appendEntriesReply.toString());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug(appendEntriesReply.toString());
+            }
         }
 
         // Update the FollowerLogInformation
@@ -142,7 +152,7 @@ public class Leader extends AbstractRaftActorBehavior {
             followerToLog.get(followerId);
 
         if(followerLogInformation == null){
-            context.getLogger().error("Unknown follower {}", followerId);
+            LOG.error("Unknown follower {}", followerId);
             return state();
         }
 
@@ -196,6 +206,16 @@ public class Leader extends AbstractRaftActorBehavior {
         return state();
     }
 
+    protected ClientRequestTracker removeClientRequestTracker(long logIndex) {
+
+        ClientRequestTracker toRemove = findClientRequestTracker(logIndex);
+        if(toRemove != null) {
+            trackerList.remove(toRemove);
+        }
+
+        return toRemove;
+    }
+
     protected ClientRequestTracker findClientRequestTracker(long logIndex) {
         for (ClientRequestTracker tracker : trackerList) {
             if (tracker.getIndex() == logIndex) {
@@ -260,10 +280,13 @@ public class Leader extends AbstractRaftActorBehavior {
             if (reply.isSuccess()) {
                 if(followerToSnapshot.isLastChunk(reply.getChunkIndex())) {
                     //this was the last chunk reply
-                    context.getLogger().debug("InstallSnapshotReply received, " +
-                        "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
-                        reply.getChunkIndex(), followerId,
-                        context.getReplicatedLog().getSnapshotIndex() + 1);
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("InstallSnapshotReply received, " +
+                                "last chunk received, Chunk:{}. Follower:{} Setting nextIndex:{}",
+                            reply.getChunkIndex(), followerId,
+                            context.getReplicatedLog().getSnapshotIndex() + 1
+                        );
+                    }
 
                     FollowerLogInformation followerLogInformation =
                         followerToLog.get(followerId);
@@ -272,31 +295,38 @@ public class Leader extends AbstractRaftActorBehavior {
                     followerLogInformation.setNextIndex(
                         context.getReplicatedLog().getSnapshotIndex() + 1);
                     mapFollowerToSnapshot.remove(followerId);
-                    context.getLogger().debug("followerToLog.get(followerId).getNextIndex().get()=" +
-                        followerToLog.get(followerId).getNextIndex().get());
+
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("followerToLog.get(followerId).getNextIndex().get()=" +
+                            followerToLog.get(followerId).getNextIndex().get());
+                    }
 
                 } else {
                     followerToSnapshot.markSendStatus(true);
                 }
             } else {
-                context.getLogger().info("InstallSnapshotReply received, " +
-                    "sending snapshot chunk failed, Will retry, Chunk:{}",
-                    reply.getChunkIndex());
+                LOG.info("InstallSnapshotReply received, " +
+                        "sending snapshot chunk failed, Will retry, Chunk:{}",
+                    reply.getChunkIndex()
+                );
                 followerToSnapshot.markSendStatus(false);
             }
 
         } else {
-            context.getLogger().error("ERROR!!" +
-                "FollowerId in InstallSnapshotReply not known to Leader" +
-                " or Chunk Index in InstallSnapshotReply not matching {} != {}",
-                followerToSnapshot.getChunkIndex(), reply.getChunkIndex() );
+            LOG.error("ERROR!!" +
+                    "FollowerId in InstallSnapshotReply not known to Leader" +
+                    " or Chunk Index in InstallSnapshotReply not matching {} != {}",
+                followerToSnapshot.getChunkIndex(), reply.getChunkIndex()
+            );
         }
     }
 
     private void replicate(Replicate replicate) {
         long logIndex = replicate.getReplicatedLogEntry().getIndex();
 
-        context.getLogger().debug("Replicate message " + logIndex);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Replicate message " + logIndex);
+        }
 
         // Create a tracker entry we will use this later to notify the
         // client actor
@@ -350,10 +380,13 @@ public class Leader extends AbstractRaftActorBehavior {
                         if (followerNextIndex >= 0 && leaderLastIndex >= followerNextIndex ) {
                             // if the follower is just not starting and leader's index
                             // is more than followers index
-                            context.getLogger().debug("SendInstallSnapshot to follower:{}," +
-                                "follower-nextIndex:{}, leader-snapshot-index:{},  " +
-                                "leader-last-index:{}", followerId,
-                                followerNextIndex, leaderSnapShotIndex, leaderLastIndex);
+                            if(LOG.isDebugEnabled()) {
+                                LOG.debug("SendInstallSnapshot to follower:{}," +
+                                        "follower-nextIndex:{}, leader-snapshot-index:{},  " +
+                                        "leader-last-index:{}", followerId,
+                                    followerNextIndex, leaderSnapShotIndex, leaderLastIndex
+                                );
+                            }
 
                             actor().tell(new SendInstallSnapshot(), actor());
                         } else {
@@ -412,11 +445,11 @@ public class Leader extends AbstractRaftActorBehavior {
                 ).toSerializable(),
                 actor()
             );
-            context.getLogger().info("InstallSnapshot sent to follower {}, Chunk: {}/{}",
+            LOG.info("InstallSnapshot sent to follower {}, Chunk: {}/{}",
                 followerActor.path(), mapFollowerToSnapshot.get(followerId).getChunkIndex(),
                 mapFollowerToSnapshot.get(followerId).getTotalChunks());
         } catch (IOException e) {
-            context.getLogger().error("InstallSnapshot failed for Leader.", e);
+            LOG.error("InstallSnapshot failed for Leader.", e);
         }
     }
 
@@ -431,7 +464,9 @@ public class Leader extends AbstractRaftActorBehavior {
             mapFollowerToSnapshot.put(followerId, followerToSnapshot);
         }
         ByteString nextChunk = followerToSnapshot.getNextChunk();
-        context.getLogger().debug("Leader's snapshot nextChunk size:{}", nextChunk.size());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Leader's snapshot nextChunk size:{}", nextChunk.size());
+        }
 
         return nextChunk;
     }
@@ -526,8 +561,10 @@ public class Leader extends AbstractRaftActorBehavior {
             int size = snapshotBytes.size();
             totalChunks = ( size / context.getConfigParams().getSnapshotChunkSize()) +
                 ((size % context.getConfigParams().getSnapshotChunkSize()) > 0 ? 1 : 0);
-            context.getLogger().debug("Snapshot {} bytes, total chunks to send:{}",
-                size, totalChunks);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Snapshot {} bytes, total chunks to send:{}",
+                    size, totalChunks);
+            }
         }
 
         public ByteString getSnapshotBytes() {
@@ -591,8 +628,10 @@ public class Leader extends AbstractRaftActorBehavior {
                 }
             }
 
-            context.getLogger().debug("length={}, offset={},size={}",
-                snapshotLength, start, size);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("length={}, offset={},size={}",
+                    snapshotLength, start, size);
+            }
             return getSnapshotBytes().substring(start, start + size);
 
         }
index 6665d7549b0d82abe752bcd50aed1826455b331e..5149cf9f34f5f66873feb4695b8939c36aa81d6b 100644 (file)
@@ -132,7 +132,7 @@ public class AppendEntries extends AbstractRaftRPC {
             try {
                 if(leProtoBuff.getData() != null && leProtoBuff.getData().getClientPayloadClassName() != null) {
                     String clientPayloadClassName = leProtoBuff.getData().getClientPayloadClassName();
-                    payload = (Payload)Class.forName(clientPayloadClassName).newInstance();
+                    payload = (Payload) Class.forName(clientPayloadClassName).newInstance();
                     payload = payload.decode(leProtoBuff.getData());
                     payload.setClientPayloadClassName(clientPayloadClassName);
                 } else {
index 9d40fa3d9edb3858969797e929776ddcba424333..c084cba82210823ada7f79a0edb35472ec6ed326 100644 (file)
@@ -9,7 +9,7 @@
 package org.opendaylight.controller.cluster.raft.messages;
 
 import com.google.protobuf.ByteString;
-import org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
 
 public class InstallSnapshot extends AbstractRaftRPC {
 
index 12123db12995061901a39a264c79f0237d78d00a..9b099c2abac8223529b750c6ea906925105ec487 100644 (file)
@@ -2,18 +2,24 @@ package org.opendaylight.controller.cluster.raft;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
+import akka.actor.PoisonPill;
 import akka.actor.Props;
 import akka.event.Logging;
 import akka.japi.Creator;
 import akka.testkit.JavaTestKit;
+import akka.testkit.TestActorRef;
 import com.google.protobuf.ByteString;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeaderReply;
+import org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore;
 
+import java.util.ArrayList;
 import java.util.Collections;
+import java.util.List;
 import java.util.Map;
 
+import static junit.framework.Assert.assertTrue;
 import static junit.framework.TestCase.assertEquals;
 
 public class RaftActorTest extends AbstractActorTest {
@@ -21,11 +27,21 @@ public class RaftActorTest extends AbstractActorTest {
 
     public static class MockRaftActor extends RaftActor {
 
+        boolean applySnapshotCalled = false;
+
         public MockRaftActor(String id,
             Map<String, String> peerAddresses) {
             super(id, peerAddresses);
         }
 
+        public RaftActorContext getRaftActorContext() {
+            return context;
+        }
+
+        public boolean isApplySnapshotCalled() {
+            return applySnapshotCalled;
+        }
+
         public static Props props(final String id, final Map<String, String> peerAddresses){
             return Props.create(new Creator<MockRaftActor>(){
 
@@ -45,7 +61,7 @@ public class RaftActorTest extends AbstractActorTest {
         }
 
         @Override protected void applySnapshot(ByteString snapshot) {
-            throw new UnsupportedOperationException("applySnapshot");
+           applySnapshotCalled = true;
         }
 
         @Override protected void onStateChanged() {
@@ -134,5 +150,56 @@ public class RaftActorTest extends AbstractActorTest {
         kit.findLeader(kit.getRaftActor().path().toString());
     }
 
+    @Test
+    public void testActorRecovery() {
+        new JavaTestKit(getSystem()) {{
+            new Within(duration("1 seconds")) {
+                protected void run() {
+
+                    String persistenceId = "follower10";
+
+                    ActorRef followerActor = getSystem().actorOf(
+                        MockRaftActor.props(persistenceId, Collections.EMPTY_MAP), persistenceId);
+
+
+                    List<ReplicatedLogEntry> entries = new ArrayList<>();
+                    ReplicatedLogEntry entry1 = new MockRaftActorContext.MockReplicatedLogEntry(1, 4, new MockRaftActorContext.MockPayload("E"));
+                    ReplicatedLogEntry entry2 = new MockRaftActorContext.MockReplicatedLogEntry(1, 5, new MockRaftActorContext.MockPayload("F"));
+                    entries.add(entry1);
+                    entries.add(entry2);
+
+                    int lastApplied = 3;
+                    int lastIndex = 5;
+                    Snapshot snapshot = Snapshot.create("A B C D".getBytes(), entries, lastIndex, 1 , lastApplied, 1);
+                    MockSnapshotStore.setMockSnapshot(snapshot);
+                    MockSnapshotStore.setPersistenceId(persistenceId);
+
+                    followerActor.tell(PoisonPill.getInstance(), null);
+                    try {
+                        // give some time for actor to die
+                        Thread.sleep(200);
+                    } catch (InterruptedException e) {
+                        e.printStackTrace();
+                    }
+
+                    TestActorRef<MockRaftActor> ref = TestActorRef.create(getSystem(), MockRaftActor.props(persistenceId, Collections.EMPTY_MAP));
+                    try {
+                        //give some time for snapshot offer to get called.
+                        Thread.sleep(200);
+                    } catch (InterruptedException e) {
+                        e.printStackTrace();
+                    }
+                    RaftActorContext context = ref.underlyingActor().getRaftActorContext();
+                    assertEquals(entries.size(), context.getReplicatedLog().size());
+                    assertEquals(lastApplied, context.getLastApplied());
+                    assertEquals(lastApplied, context.getCommitIndex());
+                    assertTrue(ref.underlyingActor().isApplySnapshotCalled());
+                }
+
+            };
+        }};
+
+    }
+
 
 }
index 227d1effa7e9b8b3bb65932fe8c25b1a2eecdbf5..fd4a75a22f735c06d2ab8042eb13d6e92de572a0 100644 (file)
@@ -3,6 +3,8 @@ package org.opendaylight.controller.cluster.raft.behaviors;
 import akka.actor.ActorRef;
 import akka.actor.Props;
 import akka.testkit.JavaTestKit;
+import akka.util.Timeout;
+import com.google.protobuf.ByteString;
 import junit.framework.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
@@ -10,19 +12,35 @@ import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
+import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
 import org.opendaylight.controller.cluster.raft.base.messages.ElectionTimeout;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
+import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
 import org.opendaylight.controller.cluster.raft.messages.RequestVote;
 import org.opendaylight.controller.cluster.raft.messages.RequestVoteReply;
 import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
-
+import org.opendaylight.controller.cluster.raft.utils.MessageCollectorActor;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectOutputStream;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
 
+import static akka.pattern.Patterns.ask;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
 public class FollowerTest extends AbstractRaftActorBehaviorTest {
 
@@ -34,8 +52,12 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest {
         return new Follower(actorContext);
     }
 
-    @Override protected RaftActorContext createActorContext() {
-        return new MockRaftActorContext("test", getSystem(), followerActor);
+    @Override protected  RaftActorContext createActorContext() {
+        return createActorContext(followerActor);
+    }
+
+    protected  RaftActorContext createActorContext(ActorRef actorRef){
+        return new MockRaftActorContext("test", getSystem(), actorRef);
     }
 
     @Test
@@ -158,13 +180,14 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest {
                 createActorContext();
 
             context.setLastApplied(100);
-            setLastLogEntry((MockRaftActorContext) context, 1, 100, new MockRaftActorContext.MockPayload(""));
+            setLastLogEntry((MockRaftActorContext) context, 1, 100,
+                new MockRaftActorContext.MockPayload(""));
             ((MockRaftActorContext) context).getReplicatedLog().setSnapshotIndex(99);
 
             List<ReplicatedLogEntry> entries =
                 Arrays.asList(
-                    (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(2, 101,
-                        new MockRaftActorContext.MockPayload("foo"))
+                        (ReplicatedLogEntry) new MockRaftActorContext.MockReplicatedLogEntry(2, 101,
+                                new MockRaftActorContext.MockPayload("foo"))
                 );
 
             // The new commitIndex is 101
@@ -409,4 +432,148 @@ public class FollowerTest extends AbstractRaftActorBehaviorTest {
         }};
     }
 
+
+    /**
+     * This test verifies that when InstallSnapshot is received by
+     * the follower its applied correctly.
+     *
+     * @throws Exception
+     */
+    @Test
+    public void testHandleInstallSnapshot() throws Exception {
+        JavaTestKit javaTestKit = new JavaTestKit(getSystem()) {{
+
+            ActorRef leaderActor = getSystem().actorOf(Props.create(
+                MessageCollectorActor.class));
+
+            MockRaftActorContext context = (MockRaftActorContext)
+                createActorContext(getRef());
+
+            Follower follower = (Follower)createBehavior(context);
+
+            HashMap<String, String> followerSnapshot = new HashMap<>();
+            followerSnapshot.put("1", "A");
+            followerSnapshot.put("2", "B");
+            followerSnapshot.put("3", "C");
+
+            ByteString bsSnapshot  = toByteString(followerSnapshot);
+            ByteString chunkData = ByteString.EMPTY;
+            int offset = 0;
+            int snapshotLength = bsSnapshot.size();
+            int i = 1;
+
+            do {
+                chunkData = getNextChunk(bsSnapshot, offset);
+                final InstallSnapshot installSnapshot =
+                    new InstallSnapshot(1, "leader-1", i, 1,
+                        chunkData, i, 3);
+                follower.handleMessage(leaderActor, installSnapshot);
+                offset = offset + 50;
+                i++;
+            } while ((offset+50) < snapshotLength);
+
+            final InstallSnapshot installSnapshot3 = new InstallSnapshot(1, "leader-1", 3, 1, chunkData, 3, 3);
+            follower.handleMessage(leaderActor, installSnapshot3);
+
+            String[] matches = new ReceiveWhile<String>(String.class, duration("2 seconds")) {
+                @Override
+                protected String match(Object o) throws Exception {
+                    if (o instanceof ApplySnapshot) {
+                        ApplySnapshot as = (ApplySnapshot)o;
+                        if (as.getSnapshot().getLastIndex() != installSnapshot3.getLastIncludedIndex()) {
+                            return "applySnapshot-lastIndex-mismatch";
+                        }
+                        if (as.getSnapshot().getLastAppliedTerm() != installSnapshot3.getLastIncludedTerm()) {
+                            return "applySnapshot-lastAppliedTerm-mismatch";
+                        }
+                        if (as.getSnapshot().getLastAppliedIndex() != installSnapshot3.getLastIncludedIndex()) {
+                            return "applySnapshot-lastAppliedIndex-mismatch";
+                        }
+                        if (as.getSnapshot().getLastTerm() != installSnapshot3.getLastIncludedTerm()) {
+                            return "applySnapshot-lastTerm-mismatch";
+                        }
+                        return "applySnapshot";
+                    }
+
+                    return "ignoreCase";
+                }
+            }.get();
+
+            String applySnapshotMatch = "";
+            for (String reply: matches) {
+                if (reply.startsWith("applySnapshot")) {
+                    applySnapshotMatch = reply;
+                }
+            }
+
+            assertEquals("applySnapshot", applySnapshotMatch);
+
+            Object messages = executeLocalOperation(leaderActor, "get-all-messages");
+
+            assertNotNull(messages);
+            assertTrue(messages instanceof List);
+            List<Object> listMessages = (List<Object>) messages;
+
+            int installSnapshotReplyReceivedCount = 0;
+            for (Object message: listMessages) {
+                if (message instanceof InstallSnapshotReply) {
+                    ++installSnapshotReplyReceivedCount;
+                }
+            }
+
+            assertEquals(3, installSnapshotReplyReceivedCount);
+
+        }};
+    }
+
+    public Object executeLocalOperation(ActorRef actor, Object message) throws Exception {
+        FiniteDuration operationDuration = Duration.create(5, TimeUnit.SECONDS);
+        Timeout operationTimeout = new Timeout(operationDuration);
+        Future<Object> future = ask(actor, message, operationTimeout);
+
+        try {
+            return Await.result(future, operationDuration);
+        } catch (Exception e) {
+            throw e;
+        }
+    }
+
+    public ByteString getNextChunk (ByteString bs, int offset){
+        int snapshotLength = bs.size();
+        int start = offset;
+        int size = 50;
+        if (50 > snapshotLength) {
+            size = snapshotLength;
+        } else {
+            if ((start + 50) > snapshotLength) {
+                size = snapshotLength - start;
+            }
+        }
+        return bs.substring(start, start + size);
+    }
+
+    private ByteString toByteString(Map<String, String> state) {
+        ByteArrayOutputStream b = null;
+        ObjectOutputStream o = null;
+        try {
+            try {
+                b = new ByteArrayOutputStream();
+                o = new ObjectOutputStream(b);
+                o.writeObject(state);
+                byte[] snapshotBytes = b.toByteArray();
+                return ByteString.copyFrom(snapshotBytes);
+            } finally {
+                if (o != null) {
+                    o.flush();
+                    o.close();
+                }
+                if (b != null) {
+                    b.close();
+                }
+            }
+        } catch (IOException e) {
+            org.junit.Assert.fail("IOException in converting Hashmap to Bytestring:" + e);
+        }
+        return null;
+    }
 }
index 73c9f96b82a0a582f4cf5e61b5d68c488f9bc198..c4ef51d968422533f9df668bb23fd56563dc2ad2 100644 (file)
@@ -22,8 +22,8 @@ import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapsho
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshot;
 import org.opendaylight.controller.cluster.raft.messages.InstallSnapshotReply;
-import org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages;
 import org.opendaylight.controller.cluster.raft.utils.DoNothingActor;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MessageCollectorActor.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MessageCollectorActor.java
new file mode 100644 (file)
index 0000000..88eecfe
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.actor.UntypedActor;
+
+import java.util.ArrayList;
+import java.util.List;
+
+
+public class MessageCollectorActor extends UntypedActor {
+    private List<Object> messages = new ArrayList<>();
+
+    @Override public void onReceive(Object message) throws Exception {
+        if(message instanceof String){
+            if("get-all-messages".equals(message)){
+                getSender().tell(messages, getSelf());
+            }
+        } else {
+            messages.add(message);
+        }
+    }
+}
diff --git a/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MockSnapshotStore.java b/opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/utils/MockSnapshotStore.java
new file mode 100644 (file)
index 0000000..d70bf92
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.raft.utils;
+
+import akka.dispatch.Futures;
+import akka.japi.Option;
+import akka.persistence.SelectedSnapshot;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.snapshot.japi.SnapshotStore;
+import org.opendaylight.controller.cluster.raft.Snapshot;
+import scala.concurrent.Future;
+
+
+public class MockSnapshotStore  extends SnapshotStore {
+
+    private static Snapshot mockSnapshot;
+    private static String persistenceId;
+
+    public static void setMockSnapshot(Snapshot s) {
+        mockSnapshot = s;
+    }
+
+    public static void setPersistenceId(String pId) {
+        persistenceId = pId;
+    }
+
+    @Override
+    public Future<Option<SelectedSnapshot>> doLoadAsync(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) {
+        if (mockSnapshot == null) {
+            return Futures.successful(Option.<SelectedSnapshot>none());
+        }
+
+        SnapshotMetadata smd = new SnapshotMetadata(persistenceId, 1, 12345);
+        SelectedSnapshot selectedSnapshot =
+            new SelectedSnapshot(smd, mockSnapshot);
+        return Futures.successful(Option.some(selectedSnapshot));
+    }
+
+    @Override
+    public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
+        return null;
+    }
+
+    @Override
+    public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
+
+    }
+
+    @Override
+    public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
+
+    }
+
+    @Override
+    public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria) throws Exception {
+
+    }
+}
index 2b753004c48265620628fdbc58a1e41a96a65c51..6b2cc2203844198bc546ab509695d3c134a35a8b 100644 (file)
@@ -1,4 +1,6 @@
 akka {
+    persistence.snapshot-store.plugin = "mock-snapshot-store"
+
     loglevel = "DEBUG"
     loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
 
@@ -19,3 +21,10 @@ akka {
         }
     }
 }
+
+mock-snapshot-store {
+  # Class name of the plugin.
+  class = "org.opendaylight.controller.cluster.raft.utils.MockSnapshotStore"
+  # Dispatcher for the plugin actor.
+  plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
index a3619ec4d230463edcbf7e15957b230db9d0cb09..d12f867ac5663b7637ecff59110dfbb0303798ec 100644 (file)
       </dependency>
 
 
+      <dependency>
+          <groupId>com.typesafe.akka</groupId>
+          <artifactId>akka-osgi_${scala.version}</artifactId>
+      </dependency>
+      <dependency>
+          <groupId>com.typesafe.akka</groupId>
+          <artifactId>akka-actor_${scala.version}</artifactId>
+      </dependency>
       <dependency>
           <groupId>com.google.guava</groupId>
           <artifactId>guava</artifactId>
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractConfig.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractConfig.java
new file mode 100644 (file)
index 0000000..3a66aa1
--- /dev/null
@@ -0,0 +1,47 @@
+package org.opendaylight.controller.cluster.common.actor;
+
+import com.google.common.base.Preconditions;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+
+import java.util.HashMap;
+import java.util.Map;
+
+public abstract class AbstractConfig implements UnifiedConfig {
+
+    private Config config;
+
+    public AbstractConfig(Config config){
+        this.config = config;
+    }
+
+    @Override
+    public Config get() {
+        return config;
+    }
+
+    public static abstract class Builder<T extends Builder>{
+        protected Map<String, Object> configHolder;
+        protected Config fallback;
+
+        private final String actorSystemName;
+
+        public Builder(String actorSystemName){
+            Preconditions.checkArgument(actorSystemName != null, "Actor system name must not be null");
+            this.actorSystemName = actorSystemName;
+            configHolder = new HashMap<>();
+        }
+
+        public T withConfigReader(AkkaConfigurationReader reader){
+            fallback = reader.read().getConfig(actorSystemName);
+            return (T)this;
+        }
+
+        protected Config merge(){
+            if (fallback == null)
+                fallback = ConfigFactory.load().getConfig(actorSystemName);
+
+            return ConfigFactory.parseMap(configHolder).withFallback(fallback);
+        }
+    }
+}
@@ -6,31 +6,36 @@
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
 
-package org.opendaylight.controller.cluster.datastore;
+package org.opendaylight.controller.cluster.common.actor;
 
 import akka.actor.UntypedActor;
 import akka.event.Logging;
 import akka.event.LoggingAdapter;
-import org.opendaylight.controller.cluster.datastore.messages.Monitor;
 
 public abstract class AbstractUntypedActor extends UntypedActor {
     protected final LoggingAdapter LOG =
         Logging.getLogger(getContext().system(), this);
 
-
     public AbstractUntypedActor() {
-        LOG.debug("Actor created {}", getSelf());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Actor created {}", getSelf());
+        }
         getContext().
             system().
             actorSelection("user/termination-monitor").
             tell(new Monitor(getSelf()), getSelf());
+
     }
 
     @Override public void onReceive(Object message) throws Exception {
-        LOG.debug("Received message {}", message.getClass().getSimpleName());
+        final String messageType = message.getClass().getSimpleName();
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Received message {}", messageType);
+        }
         handleReceive(message);
-        LOG.debug("Done handling message {}",
-            message.getClass().getSimpleName());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Done handling message {}", messageType);
+        }
     }
 
     protected abstract void handleReceive(Object message) throws Exception;
@@ -40,7 +45,9 @@ public abstract class AbstractUntypedActor extends UntypedActor {
     }
 
     protected void unknownMessage(Object message) throws Exception {
-        LOG.debug("Received unhandled message {}", message);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Received unhandled message {}", message);
+        }
         unhandled(message);
     }
 }
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActorWithMetering.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedActorWithMetering.java
new file mode 100644 (file)
index 0000000..5497f93
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+/**
+ * Actor with its behaviour metered. Metering is enabled by configuration.
+ */
+public abstract class AbstractUntypedActorWithMetering extends AbstractUntypedActor {
+
+    public AbstractUntypedActorWithMetering() {
+        if (isMetricsCaptureEnabled())
+            getContext().become(new MeteringBehavior(this));
+    }
+
+    private boolean isMetricsCaptureEnabled(){
+        CommonConfig config = new CommonConfig(getContext().system().settings().config());
+        return config.isMetricCaptureEnabled();
+    }
+}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/CommonConfig.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/CommonConfig.java
new file mode 100644 (file)
index 0000000..0d139f9
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+
+import com.google.common.base.Preconditions;
+import com.typesafe.config.Config;
+import scala.concurrent.duration.Duration;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+public class CommonConfig extends AbstractConfig {
+
+    protected static final String TAG_ACTOR_SYSTEM_NAME = "actor-system-name";
+    protected static final String TAG_METRIC_CAPTURE_ENABLED = "metric-capture-enabled";
+    protected static final String TAG_MAILBOX_CAPACITY = "mailbox-capacity";
+    protected static final String TAG_MAILBOX = "bounded-mailbox";
+    protected static final String TAG_MAILBOX_PUSH_TIMEOUT = "mailbox-push-timeout-time";
+
+    //TODO: Ideally these defaults should go to reference.conf
+    // https://bugs.opendaylight.org/show_bug.cgi?id=1709
+    private static final int DEFAULT_MAILBOX_CAPACITY = 1000;
+    private static final int DEFAULT_MAILBOX_PUSH_TIMEOUT = 100;
+
+    //locally cached values
+    private FiniteDuration cachedMailBoxPushTimeout;
+    private Integer cachedMailBoxCapacity;
+    private Boolean cachedMetricCaptureEnableFlag;
+
+    public CommonConfig(Config config) {
+        super(config);
+    }
+
+    public String getActorSystemName() {
+        return get().getString(TAG_ACTOR_SYSTEM_NAME);
+    }
+
+    public boolean isMetricCaptureEnabled(){
+        if (cachedMetricCaptureEnableFlag != null){
+            return cachedMetricCaptureEnableFlag;
+        }
+
+        cachedMetricCaptureEnableFlag = get().hasPath(TAG_METRIC_CAPTURE_ENABLED)
+                ? get().getBoolean(TAG_METRIC_CAPTURE_ENABLED)
+                : false;
+
+        return cachedMetricCaptureEnableFlag;
+    }
+
+    public String getMailBoxName() {
+        return TAG_MAILBOX;
+    }
+
+    public Integer getMailBoxCapacity() {
+
+        if (cachedMailBoxCapacity != null) {
+            return cachedMailBoxCapacity;
+        }
+
+        final String PATH = new StringBuilder(TAG_MAILBOX).append(".").append(TAG_MAILBOX_CAPACITY).toString();
+        cachedMailBoxCapacity = get().hasPath(PATH)
+                ? get().getInt(PATH)
+                : DEFAULT_MAILBOX_CAPACITY;
+
+        return cachedMailBoxCapacity;
+    }
+
+    public FiniteDuration getMailBoxPushTimeout() {
+
+        if (cachedMailBoxPushTimeout != null) {
+            return cachedMailBoxPushTimeout;
+        }
+
+        final String PATH = new StringBuilder(TAG_MAILBOX).append(".").append(TAG_MAILBOX_PUSH_TIMEOUT).toString();
+
+        long timeout = get().hasPath(PATH)
+                ? get().getDuration(PATH, TimeUnit.NANOSECONDS)
+                : DEFAULT_MAILBOX_PUSH_TIMEOUT;
+
+        cachedMailBoxPushTimeout = new FiniteDuration(timeout, TimeUnit.NANOSECONDS);
+        return cachedMailBoxPushTimeout;
+    }
+
+    public static class Builder<T extends Builder> extends AbstractConfig.Builder<T>{
+
+        public Builder(String actorSystemName) {
+            super(actorSystemName);
+
+            //actor system config
+            configHolder.put(TAG_ACTOR_SYSTEM_NAME, actorSystemName);
+
+            //config for bounded mailbox
+            configHolder.put(TAG_MAILBOX, new HashMap<String, Object>());
+        }
+
+        public T metricCaptureEnabled(boolean enabled) {
+            configHolder.put(TAG_METRIC_CAPTURE_ENABLED, String.valueOf(enabled));
+            return (T)this;
+        }
+
+        public T mailboxCapacity(int capacity) {
+            Preconditions.checkArgument(capacity > 0, "mailbox capacity must be >0");
+
+            Map<String, Object> boundedMailbox = (Map) configHolder.get(TAG_MAILBOX);
+            boundedMailbox.put(TAG_MAILBOX_CAPACITY, capacity);
+            return (T)this;
+        }
+
+        public T mailboxPushTimeout(String timeout){
+            Duration pushTimeout = Duration.create(timeout);
+            Preconditions.checkArgument(pushTimeout.isFinite(), "invalid value for mailbox push timeout");
+
+            Map<String, Object> boundedMailbox = (Map) configHolder.get(TAG_MAILBOX);
+            boundedMailbox.put(TAG_MAILBOX_PUSH_TIMEOUT, timeout);
+            return (T)this;
+        }
+
+        public CommonConfig build() {
+            return new CommonConfig(merge());
+        }
+    }
+}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/MeteredBoundedMailbox.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/MeteredBoundedMailbox.java
new file mode 100644 (file)
index 0000000..458f379
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.dispatch.BoundedDequeBasedMailbox;
+import akka.dispatch.MailboxType;
+import akka.dispatch.ProducesMessageQueue;
+import com.codahale.metrics.Gauge;
+import com.codahale.metrics.MetricRegistry;
+import com.typesafe.config.Config;
+import org.opendaylight.controller.cluster.reporting.MetricsReporter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.duration.FiniteDuration;
+
+public class MeteredBoundedMailbox implements MailboxType, ProducesMessageQueue<MeteredBoundedMailbox.MeteredMessageQueue> {
+
+    private final Logger LOG = LoggerFactory.getLogger(MeteredBoundedMailbox.class);
+
+    private MeteredMessageQueue queue;
+    private Integer capacity;
+    private FiniteDuration pushTimeOut;
+    private MetricRegistry registry;
+
+    private final String QUEUE_SIZE = "q-size";
+
+    public MeteredBoundedMailbox(ActorSystem.Settings settings, Config config) {
+
+        CommonConfig commonConfig = new CommonConfig(settings.config());
+        this.capacity = commonConfig.getMailBoxCapacity();
+        this.pushTimeOut = commonConfig.getMailBoxPushTimeout();
+
+        MetricsReporter reporter = MetricsReporter.getInstance();
+        registry = reporter.getMetricsRegistry();
+    }
+
+
+    @Override
+    public MeteredMessageQueue create(final scala.Option<ActorRef> owner, scala.Option<ActorSystem> system) {
+        this.queue = new MeteredMessageQueue(this.capacity, this.pushTimeOut);
+        monitorQueueSize(owner, this.queue);
+        return this.queue;
+    }
+
+    private void monitorQueueSize(scala.Option<ActorRef> owner, final MeteredMessageQueue monitoredQueue) {
+        if (owner.isEmpty()) {
+            return; //there's no actor to monitor
+        }
+        String actorName = owner.get().path().toStringWithoutAddress();
+        String metricName = registry.name(actorName, QUEUE_SIZE);
+
+        if (registry.getMetrics().containsKey(metricName))
+            return; //already registered
+
+        Gauge queueSize = getQueueSizeGuage(monitoredQueue);
+        registerQueueSizeMetric(metricName, queueSize);
+    }
+
+
+    public static class MeteredMessageQueue extends BoundedDequeBasedMailbox.MessageQueue {
+
+        public MeteredMessageQueue(int capacity, FiniteDuration pushTimeOut) {
+            super(capacity, pushTimeOut);
+        }
+    }
+
+    private Gauge getQueueSizeGuage(final MeteredMessageQueue monitoredQueue ){
+        return new Gauge<Integer>() {
+            @Override
+            public Integer getValue() {
+                return monitoredQueue.size();
+            }
+        };
+    }
+
+    private void registerQueueSizeMetric(String metricName, Gauge metric){
+        try {
+            registry.register(metricName,metric);
+        } catch (IllegalArgumentException e) {
+            LOG.warn("Unable to register queue size in metrics registry. Failed with exception {}. ", e);
+        }
+    }
+}
+
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/MeteringBehavior.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/MeteringBehavior.java
new file mode 100644 (file)
index 0000000..d67d413
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import akka.actor.UntypedActor;
+import akka.japi.Procedure;
+import com.codahale.metrics.MetricRegistry;
+import com.codahale.metrics.Timer;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.reporting.MetricsReporter;
+
+/**
+ * Represents behaviour that can be exhibited by actors of type {@link akka.actor.UntypedActor}
+ * <p/>
+ * This behaviour meters actor's default behaviour. It captures 2 metrics:
+ * <ul>
+ *     <li>message processing rate of actor's receive block</li>
+ *     <li>message processing rate by message type</li>
+ * </ul>
+ *
+ * The information is reported to {@link org.opendaylight.controller.cluster.reporting.MetricsReporter}
+ */
+public class MeteringBehavior implements Procedure<Object> {
+
+    private final UntypedActor meteredActor;
+
+    private final MetricRegistry METRICREGISTRY = MetricsReporter.getInstance().getMetricsRegistry();
+    private final String MSG_PROCESSING_RATE = "msg-rate";
+
+    private String actorName;
+    private Timer msgProcessingTimer;
+
+    /**
+     *
+     * @param actor whose behaviour needs to be metered
+     */
+    public MeteringBehavior(UntypedActor actor){
+        Preconditions.checkArgument(actor != null, "actor must not be null");
+
+        this.meteredActor = actor;
+        actorName = meteredActor.getSelf().path().toStringWithoutAddress();
+        final String msgProcessingTime = MetricRegistry.name(actorName, MSG_PROCESSING_RATE);
+        msgProcessingTimer = METRICREGISTRY.timer(msgProcessingTime);
+    }
+
+    /**
+     * Uses 2 timers to measure message processing rate. One for overall message processing rate and
+     * another to measure rate by message type. The timers are re-used if they were previously created.
+     * <p/>
+     * {@link com.codahale.metrics.MetricRegistry} maintains a reservoir for different timers where
+     * collected timings are kept. It exposes various metrics for each timer based on collected
+     * data. Eg: count of messages, 99, 95, 50... percentiles, max, mean etc.
+     * <p/>
+     * These metrics are exposed as JMX bean.
+     *
+     * @see <a href="http://dropwizard.github.io/metrics/manual/core/#timers">
+     *     http://dropwizard.github.io/metrics/manual/core/#timers</a>
+     *
+     * @param message
+     * @throws Exception
+     */
+    @Override
+    public void apply(Object message) throws Exception {
+        final String messageType = message.getClass().getSimpleName();
+
+        final String msgProcessingTimeByMsgType =
+                MetricRegistry.name(actorName, MSG_PROCESSING_RATE, messageType);
+
+        final Timer msgProcessingTimerByMsgType = METRICREGISTRY.timer(msgProcessingTimeByMsgType);
+
+        //start timers
+        final Timer.Context context = msgProcessingTimer.time();
+        final Timer.Context contextByMsgType = msgProcessingTimerByMsgType.time();
+
+        meteredActor.onReceive(message);
+
+        //stop timers
+        contextByMsgType.stop();
+        context.stop();
+    }
+}
@@ -6,7 +6,7 @@
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
 
-package org.opendaylight.controller.remote.rpc.messages;
+package org.opendaylight.controller.cluster.common.actor;
 
 import akka.actor.ActorRef;
 
@@ -14,7 +14,6 @@ public class Monitor {
     private final ActorRef actorRef;
 
     public Monitor(ActorRef actorRef){
-
         this.actorRef = actorRef;
     }
 
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/UnifiedConfig.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/UnifiedConfig.java
new file mode 100644 (file)
index 0000000..62b6055
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.common.actor;
+
+import com.typesafe.config.Config;
+
+/**
+ * Represents a unified view of configuration.
+ * <p/>
+ * It merges configuration from:
+ * <ul>
+ *     <li>Config subsystem</li>
+ *     <li>Akka configuration files</li>
+ * </ul>
+ *
+ * Configurations defined in config subsystem takes precedence.
+ */
+public interface UnifiedConfig {
+
+    /**
+     * Returns an immutable instance of unified configuration
+     * @return
+     */
+    public Config get();
+}
@@ -6,13 +6,12 @@
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
 
-package org.opendaylight.controller.cluster.datastore;
+package org.opendaylight.controller.cluster.raft.protobuff.client.messages;
 
 import com.google.common.base.Preconditions;
 import com.google.protobuf.GeneratedMessage;
 import com.google.protobuf.InvalidProtocolBufferException;
 import com.google.protobuf.UnknownFieldSet;
-import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
 import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
 import org.opendaylight.controller.protobuff.messages.persistent.PersistentMessages;
 
@@ -5,7 +5,7 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.common.reporting;
+package org.opendaylight.controller.cluster.reporting;
 
 import com.codahale.metrics.JmxReporter;
 import com.codahale.metrics.MetricRegistry;
@@ -21,7 +21,7 @@ import com.codahale.metrics.MetricRegistry;
 public class MetricsReporter implements AutoCloseable{
 
     private final MetricRegistry METRICS_REGISTRY = new MetricRegistry();
-    private final String DOMAIN = "org.opendaylight.controller";
+    private final String DOMAIN = "org.opendaylight.controller.actor.metric";
 
     public final JmxReporter jmxReporter = JmxReporter.forRegistry(METRICS_REGISTRY).inDomain(DOMAIN).build();
 
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/common/actor/MeteredBoundedMailbox.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/common/actor/MeteredBoundedMailbox.java
deleted file mode 100644 (file)
index c6d3625..0000000
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.common.actor;
-
-import akka.actor.ActorPath;
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.dispatch.BoundedDequeBasedMailbox;
-import akka.dispatch.MailboxType;
-import akka.dispatch.ProducesMessageQueue;
-import com.codahale.metrics.Gauge;
-import com.codahale.metrics.MetricRegistry;
-import com.google.common.base.Preconditions;
-import com.typesafe.config.Config;
-import org.opendaylight.controller.common.reporting.MetricsReporter;
-import scala.concurrent.duration.FiniteDuration;
-
-import java.util.concurrent.TimeUnit;
-
-public class MeteredBoundedMailbox implements MailboxType, ProducesMessageQueue<MeteredBoundedMailbox.MeteredMessageQueue> {
-
-    private MeteredMessageQueue queue;
-    private Integer capacity;
-    private FiniteDuration pushTimeOut;
-    private ActorPath actorPath;
-    private MetricsReporter reporter;
-
-    private final String QUEUE_SIZE = "queue-size";
-    private final String CAPACITY = "mailbox-capacity";
-    private final String TIMEOUT  = "mailbox-push-timeout-time";
-    private final Long DEFAULT_TIMEOUT = 10L;
-
-    public MeteredBoundedMailbox(ActorSystem.Settings settings, Config config) {
-        Preconditions.checkArgument( config.hasPath("mailbox-capacity"), "Missing configuration [mailbox-capacity]" );
-        this.capacity = config.getInt(CAPACITY);
-        Preconditions.checkArgument( this.capacity > 0, "mailbox-capacity must be > 0");
-
-        Long timeout = -1L;
-        if ( config.hasPath(TIMEOUT) ){
-            timeout = config.getDuration(TIMEOUT, TimeUnit.NANOSECONDS);
-        } else {
-            timeout = DEFAULT_TIMEOUT;
-        }
-        Preconditions.checkArgument( timeout > 0, "mailbox-push-timeout-time must be > 0");
-        this.pushTimeOut = new FiniteDuration(timeout, TimeUnit.NANOSECONDS);
-
-        reporter = MetricsReporter.getInstance();
-    }
-
-
-    @Override
-    public MeteredMessageQueue create(final scala.Option<ActorRef> owner, scala.Option<ActorSystem> system) {
-        this.queue = new MeteredMessageQueue(this.capacity, this.pushTimeOut);
-        monitorQueueSize(owner, this.queue);
-        return this.queue;
-    }
-
-    private void monitorQueueSize(scala.Option<ActorRef> owner, final MeteredMessageQueue monitoredQueue) {
-        if (owner.isEmpty()) {
-            return; //there's no actor to monitor
-        }
-        actorPath = owner.get().path();
-        String actorInstanceId = Integer.toString(owner.get().hashCode());
-
-        MetricRegistry registry = reporter.getMetricsRegistry();
-        String actorName = registry.name(actorPath.toString(), actorInstanceId, QUEUE_SIZE);
-
-        if (registry.getMetrics().containsKey(actorName))
-            return; //already registered
-
-        registry.register(actorName,
-                new Gauge<Integer>() {
-                    @Override
-                    public Integer getValue() {
-                        return monitoredQueue.size();
-                    }
-                });
-    }
-
-
-    public static class MeteredMessageQueue extends BoundedDequeBasedMailbox.MessageQueue {
-
-        public MeteredMessageQueue(int capacity, FiniteDuration pushTimeOut) {
-            super(capacity, pushTimeOut);
-        }
-    }
-
-}
-
@@ -1,7 +1,7 @@
 // Generated by the protocol buffer compiler.  DO NOT EDIT!
 // source: InstallSnapshot.proto
 
-package org.opendaylight.controller.cluster.raft.protobuff.messages;
+package org.opendaylight.controller.protobuff.messages.cluster.raft;
 
 public final class InstallSnapshotMessages {
   private InstallSnapshotMessages() {}
@@ -186,14 +186,14 @@ public final class InstallSnapshotMessages {
     }
     public static final com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+      return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
     }
 
     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
+      return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
-              org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.Builder.class);
+              org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.Builder.class);
     }
 
     public static com.google.protobuf.Parser<InstallSnapshot> PARSER =
@@ -245,7 +245,7 @@ public final class InstallSnapshotMessages {
       if (ref instanceof java.lang.String) {
         return (java.lang.String) ref;
       } else {
-        com.google.protobuf.ByteString bs = 
+        com.google.protobuf.ByteString bs =
             (com.google.protobuf.ByteString) ref;
         java.lang.String s = bs.toStringUtf8();
         if (bs.isValidUtf8()) {
@@ -261,7 +261,7 @@ public final class InstallSnapshotMessages {
         getLeaderIdBytes() {
       java.lang.Object ref = leaderId_;
       if (ref instanceof java.lang.String) {
-        com.google.protobuf.ByteString b = 
+        com.google.protobuf.ByteString b =
             com.google.protobuf.ByteString.copyFromUtf8(
                 (java.lang.String) ref);
         leaderId_ = b;
@@ -442,53 +442,53 @@ public final class InstallSnapshotMessages {
       return super.writeReplace();
     }
 
-    public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+    public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
         com.google.protobuf.ByteString data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+    public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
         com.google.protobuf.ByteString data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(byte[] data)
+    public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(byte[] data)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+    public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
         byte[] data,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(java.io.InputStream input)
+    public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+    public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseFrom(input, extensionRegistry);
     }
-    public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(java.io.InputStream input)
+    public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input);
     }
-    public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(
+    public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseDelimitedFrom(
         java.io.InputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return PARSER.parseDelimitedFrom(input, extensionRegistry);
     }
-    public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+    public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
         com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
       return PARSER.parseFrom(input);
     }
-    public static org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parseFrom(
+    public static org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parseFrom(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
@@ -497,7 +497,7 @@ public final class InstallSnapshotMessages {
 
     public static Builder newBuilder() { return Builder.create(); }
     public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot prototype) {
+    public static Builder newBuilder(org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot prototype) {
       return newBuilder().mergeFrom(prototype);
     }
     public Builder toBuilder() { return newBuilder(this); }
@@ -513,20 +513,20 @@ public final class InstallSnapshotMessages {
      */
     public static final class Builder extends
         com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshotOrBuilder {
+       implements org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshotOrBuilder {
       public static final com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
-        return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+        return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
       }
 
       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
           internalGetFieldAccessorTable() {
-        return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
+        return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
-                org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.Builder.class);
+                org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.class, org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.Builder.class);
       }
 
-      // Construct using org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.newBuilder()
+      // Construct using org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.newBuilder()
       private Builder() {
         maybeForceBuilderInitialization();
       }
@@ -569,23 +569,23 @@ public final class InstallSnapshotMessages {
 
       public com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
-        return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
+        return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.internal_static_org_opendaylight_controller_cluster_raft_InstallSnapshot_descriptor;
       }
 
-      public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot getDefaultInstanceForType() {
-        return org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance();
+      public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot getDefaultInstanceForType() {
+        return org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance();
       }
 
-      public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot build() {
-        org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot result = buildPartial();
+      public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot build() {
+        org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(result);
         }
         return result;
       }
 
-      public org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot buildPartial() {
-        org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot result = new org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot(this);
+      public org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot buildPartial() {
+        org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot result = new org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot(this);
         int from_bitField0_ = bitField0_;
         int to_bitField0_ = 0;
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
@@ -622,16 +622,16 @@ public final class InstallSnapshotMessages {
       }
 
       public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot) {
-          return mergeFrom((org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot)other);
+        if (other instanceof org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot) {
+          return mergeFrom((org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot)other);
         } else {
           super.mergeFrom(other);
           return this;
         }
       }
 
-      public Builder mergeFrom(org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot other) {
-        if (other == org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance()) return this;
+      public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot other) {
+        if (other == org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot.getDefaultInstance()) return this;
         if (other.hasTerm()) {
           setTerm(other.getTerm());
         }
@@ -667,11 +667,11 @@ public final class InstallSnapshotMessages {
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws java.io.IOException {
-        org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot parsedMessage = null;
+        org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot parsedMessage = null;
         try {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.opendaylight.controller.cluster.raft.protobuff.messages.InstallSnapshotMessages.InstallSnapshot) e.getUnfinishedMessage();
+          parsedMessage = (org.opendaylight.controller.protobuff.messages.cluster.raft.InstallSnapshotMessages.InstallSnapshot) e.getUnfinishedMessage();
           throw e;
         } finally {
           if (parsedMessage != null) {
@@ -744,7 +744,7 @@ public final class InstallSnapshotMessages {
           getLeaderIdBytes() {
         java.lang.Object ref = leaderId_;
         if (ref instanceof String) {
-          com.google.protobuf.ByteString b = 
+          com.google.protobuf.ByteString b =
               com.google.protobuf.ByteString.copyFromUtf8(
                   (java.lang.String) ref);
           leaderId_ = b;
@@ -988,8 +988,8 @@ public final class InstallSnapshotMessages {
       "\021lastIncludedIndex\030\003 \001(\003\022\030\n\020lastIncluded" +
       "Term\030\004 \001(\003\022\014\n\004data\030\005 \001(\014\022\022\n\nchunkIndex\030\006" +
       " \001(\005\022\023\n\013totalChunks\030\007 \001(\005BX\n;org.openday" +
-      "light.controller.cluster.raft.protobuff." +
-      "messagesB\027InstallSnapshotMessagesH\001"
+      "light.controller.protobuff.messages.clus" +
+      "ter.raftB\027InstallSnapshotMessagesH\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
index 63dd5e7081603aed89168980a5f6ad6bb27de28e..d956bb174be4e159bc3fe0b415126ea39a4ba140 100644 (file)
@@ -10,6 +10,21 @@ public final class ShardTransactionChainMessages {
   }
   public interface CloseTransactionChainOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
+
+    // optional string transactionChainId = 1;
+    /**
+     * <code>optional string transactionChainId = 1;</code>
+     */
+    boolean hasTransactionChainId();
+    /**
+     * <code>optional string transactionChainId = 1;</code>
+     */
+    java.lang.String getTransactionChainId();
+    /**
+     * <code>optional string transactionChainId = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getTransactionChainIdBytes();
   }
   /**
    * Protobuf type {@code org.opendaylight.controller.mdsal.CloseTransactionChain}
@@ -44,6 +59,7 @@ public final class ShardTransactionChainMessages {
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
       initFields();
+      int mutable_bitField0_ = 0;
       com.google.protobuf.UnknownFieldSet.Builder unknownFields =
           com.google.protobuf.UnknownFieldSet.newBuilder();
       try {
@@ -61,6 +77,11 @@ public final class ShardTransactionChainMessages {
               }
               break;
             }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              transactionChainId_ = input.readBytes();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -100,7 +121,52 @@ public final class ShardTransactionChainMessages {
       return PARSER;
     }
 
+    private int bitField0_;
+    // optional string transactionChainId = 1;
+    public static final int TRANSACTIONCHAINID_FIELD_NUMBER = 1;
+    private java.lang.Object transactionChainId_;
+    /**
+     * <code>optional string transactionChainId = 1;</code>
+     */
+    public boolean hasTransactionChainId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>optional string transactionChainId = 1;</code>
+     */
+    public java.lang.String getTransactionChainId() {
+      java.lang.Object ref = transactionChainId_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs =
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          transactionChainId_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>optional string transactionChainId = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getTransactionChainIdBytes() {
+      java.lang.Object ref = transactionChainId_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b =
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        transactionChainId_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
     private void initFields() {
+      transactionChainId_ = "";
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -114,6 +180,9 @@ public final class ShardTransactionChainMessages {
     public void writeTo(com.google.protobuf.CodedOutputStream output)
                         throws java.io.IOException {
       getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getTransactionChainIdBytes());
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -123,6 +192,10 @@ public final class ShardTransactionChainMessages {
       if (size != -1) return size;
 
       size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getTransactionChainIdBytes());
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -239,6 +312,8 @@ public final class ShardTransactionChainMessages {
 
       public Builder clear() {
         super.clear();
+        transactionChainId_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
 
@@ -265,6 +340,13 @@ public final class ShardTransactionChainMessages {
 
       public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CloseTransactionChain buildPartial() {
         org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CloseTransactionChain result = new org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CloseTransactionChain(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.transactionChainId_ = transactionChainId_;
+        result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
       }
@@ -280,6 +362,11 @@ public final class ShardTransactionChainMessages {
 
       public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CloseTransactionChain other) {
         if (other == org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages.CloseTransactionChain.getDefaultInstance()) return this;
+        if (other.hasTransactionChainId()) {
+          bitField0_ |= 0x00000001;
+          transactionChainId_ = other.transactionChainId_;
+          onChanged();
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -305,6 +392,81 @@ public final class ShardTransactionChainMessages {
         }
         return this;
       }
+      private int bitField0_;
+
+      // optional string transactionChainId = 1;
+      private java.lang.Object transactionChainId_ = "";
+      /**
+       * <code>optional string transactionChainId = 1;</code>
+       */
+      public boolean hasTransactionChainId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional string transactionChainId = 1;</code>
+       */
+      public java.lang.String getTransactionChainId() {
+        java.lang.Object ref = transactionChainId_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          transactionChainId_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>optional string transactionChainId = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getTransactionChainIdBytes() {
+        java.lang.Object ref = transactionChainId_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b =
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          transactionChainId_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>optional string transactionChainId = 1;</code>
+       */
+      public Builder setTransactionChainId(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        transactionChainId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string transactionChainId = 1;</code>
+       */
+      public Builder clearTransactionChainId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        transactionChainId_ = getDefaultInstance().getTransactionChainId();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string transactionChainId = 1;</code>
+       */
+      public Builder setTransactionChainIdBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        transactionChainId_ = value;
+        onChanged();
+        return this;
+      }
 
       // @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.CloseTransactionChain)
     }
@@ -1444,13 +1606,14 @@ public final class ShardTransactionChainMessages {
   static {
     java.lang.String[] descriptorData = {
       "\n\033ShardTransactionChain.proto\022!org.opend" +
-      "aylight.controller.mdsal\"\027\n\025CloseTransac" +
-      "tionChain\"\034\n\032CloseTransactionChainReply\"" +
-      "\030\n\026CreateTransactionChain\";\n\033CreateTrans" +
-      "actionChainReply\022\034\n\024transactionChainPath" +
-      "\030\001 \002(\tB[\n:org.opendaylight.controller.pr" +
-      "otobuff.messages.transactionB\035ShardTrans" +
-      "actionChainMessages"
+      "aylight.controller.mdsal\"3\n\025CloseTransac" +
+      "tionChain\022\032\n\022transactionChainId\030\001 \001(\t\"\034\n" +
+      "\032CloseTransactionChainReply\"\030\n\026CreateTra" +
+      "nsactionChain\";\n\033CreateTransactionChainR" +
+      "eply\022\034\n\024transactionChainPath\030\001 \002(\tB[\n:or" +
+      "g.opendaylight.controller.protobuff.mess" +
+      "ages.transactionB\035ShardTransactionChainM" +
+      "essages"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -1462,7 +1625,7 @@ public final class ShardTransactionChainMessages {
           internal_static_org_opendaylight_controller_mdsal_CloseTransactionChain_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_opendaylight_controller_mdsal_CloseTransactionChain_descriptor,
-              new java.lang.String[] { });
+              new java.lang.String[] { "TransactionChainId", });
           internal_static_org_opendaylight_controller_mdsal_CloseTransactionChainReply_descriptor =
             getDescriptor().getMessageTypes().get(1);
           internal_static_org_opendaylight_controller_mdsal_CloseTransactionChainReply_fieldAccessorTable = new
index ded80713fb3feba577065da17410d3408786046e..96a39bddd376a9a777be0c056693cae8a892925d 100644 (file)
@@ -653,6 +653,21 @@ public final class ShardTransactionMessages {
      * <code>required int32 transactionType = 2;</code>
      */
     int getTransactionType();
+
+    // optional string transactionChainId = 3;
+    /**
+     * <code>optional string transactionChainId = 3;</code>
+     */
+    boolean hasTransactionChainId();
+    /**
+     * <code>optional string transactionChainId = 3;</code>
+     */
+    java.lang.String getTransactionChainId();
+    /**
+     * <code>optional string transactionChainId = 3;</code>
+     */
+    com.google.protobuf.ByteString
+        getTransactionChainIdBytes();
   }
   /**
    * Protobuf type {@code org.opendaylight.controller.mdsal.CreateTransaction}
@@ -715,6 +730,11 @@ public final class ShardTransactionMessages {
               transactionType_ = input.readInt32();
               break;
             }
+            case 26: {
+              bitField0_ |= 0x00000004;
+              transactionChainId_ = input.readBytes();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -814,9 +834,53 @@ public final class ShardTransactionMessages {
       return transactionType_;
     }
 
+    // optional string transactionChainId = 3;
+    public static final int TRANSACTIONCHAINID_FIELD_NUMBER = 3;
+    private java.lang.Object transactionChainId_;
+    /**
+     * <code>optional string transactionChainId = 3;</code>
+     */
+    public boolean hasTransactionChainId() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>optional string transactionChainId = 3;</code>
+     */
+    public java.lang.String getTransactionChainId() {
+      java.lang.Object ref = transactionChainId_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs =
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          transactionChainId_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>optional string transactionChainId = 3;</code>
+     */
+    public com.google.protobuf.ByteString
+        getTransactionChainIdBytes() {
+      java.lang.Object ref = transactionChainId_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b =
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        transactionChainId_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
     private void initFields() {
       transactionId_ = "";
       transactionType_ = 0;
+      transactionChainId_ = "";
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -844,6 +908,9 @@ public final class ShardTransactionMessages {
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         output.writeInt32(2, transactionType_);
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeBytes(3, getTransactionChainIdBytes());
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -861,6 +928,10 @@ public final class ShardTransactionMessages {
         size += com.google.protobuf.CodedOutputStream
           .computeInt32Size(2, transactionType_);
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(3, getTransactionChainIdBytes());
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -981,6 +1052,8 @@ public final class ShardTransactionMessages {
         bitField0_ = (bitField0_ & ~0x00000001);
         transactionType_ = 0;
         bitField0_ = (bitField0_ & ~0x00000002);
+        transactionChainId_ = "";
+        bitField0_ = (bitField0_ & ~0x00000004);
         return this;
       }
 
@@ -1017,6 +1090,10 @@ public final class ShardTransactionMessages {
           to_bitField0_ |= 0x00000002;
         }
         result.transactionType_ = transactionType_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.transactionChainId_ = transactionChainId_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -1041,6 +1118,11 @@ public final class ShardTransactionMessages {
         if (other.hasTransactionType()) {
           setTransactionType(other.getTransactionType());
         }
+        if (other.hasTransactionChainId()) {
+          bitField0_ |= 0x00000004;
+          transactionChainId_ = other.transactionChainId_;
+          onChanged();
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -1183,6 +1265,80 @@ public final class ShardTransactionMessages {
         return this;
       }
 
+      // optional string transactionChainId = 3;
+      private java.lang.Object transactionChainId_ = "";
+      /**
+       * <code>optional string transactionChainId = 3;</code>
+       */
+      public boolean hasTransactionChainId() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>optional string transactionChainId = 3;</code>
+       */
+      public java.lang.String getTransactionChainId() {
+        java.lang.Object ref = transactionChainId_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          transactionChainId_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>optional string transactionChainId = 3;</code>
+       */
+      public com.google.protobuf.ByteString
+          getTransactionChainIdBytes() {
+        java.lang.Object ref = transactionChainId_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b =
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          transactionChainId_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>optional string transactionChainId = 3;</code>
+       */
+      public Builder setTransactionChainId(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000004;
+        transactionChainId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string transactionChainId = 3;</code>
+       */
+      public Builder clearTransactionChainId() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        transactionChainId_ = getDefaultInstance().getTransactionChainId();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string transactionChainId = 3;</code>
+       */
+      public Builder setTransactionChainIdBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000004;
+        transactionChainId_ = value;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.CreateTransaction)
     }
 
@@ -7597,36 +7753,37 @@ public final class ShardTransactionMessages {
     java.lang.String[] descriptorData = {
       "\n\026ShardTransaction.proto\022!org.opendaylig" +
       "ht.controller.mdsal\032\014Common.proto\"\022\n\020Clo" +
-      "seTransaction\"\027\n\025CloseTransactionReply\"C" +
+      "seTransaction\"\027\n\025CloseTransactionReply\"_" +
       "\n\021CreateTransaction\022\025\n\rtransactionId\030\001 \002" +
-      "(\t\022\027\n\017transactionType\030\002 \002(\005\"M\n\026CreateTra" +
-      "nsactionReply\022\034\n\024transactionActorPath\030\001 " +
-      "\002(\t\022\025\n\rtransactionId\030\002 \002(\t\"\022\n\020ReadyTrans" +
-      "action\"*\n\025ReadyTransactionReply\022\021\n\tactor" +
-      "Path\030\001 \002(\t\"l\n\nDeleteData\022^\n\037instanceIden" +
-      "tifierPathArguments\030\001 \002(\01325.org.opendayl",
-      "ight.controller.mdsal.InstanceIdentifier" +
-      "\"\021\n\017DeleteDataReply\"j\n\010ReadData\022^\n\037insta" +
-      "nceIdentifierPathArguments\030\001 \002(\01325.org.o" +
-      "pendaylight.controller.mdsal.InstanceIde" +
-      "ntifier\"P\n\rReadDataReply\022?\n\016normalizedNo" +
-      "de\030\001 \001(\0132\'.org.opendaylight.controller.m" +
-      "dsal.Node\"\254\001\n\tWriteData\022^\n\037instanceIdent" +
-      "ifierPathArguments\030\001 \002(\01325.org.opendayli" +
-      "ght.controller.mdsal.InstanceIdentifier\022" +
-      "?\n\016normalizedNode\030\002 \002(\0132\'.org.opendaylig",
-      "ht.controller.mdsal.Node\"\020\n\016WriteDataRep" +
-      "ly\"\254\001\n\tMergeData\022^\n\037instanceIdentifierPa" +
-      "thArguments\030\001 \002(\01325.org.opendaylight.con" +
-      "troller.mdsal.InstanceIdentifier\022?\n\016norm" +
-      "alizedNode\030\002 \002(\0132\'.org.opendaylight.cont" +
-      "roller.mdsal.Node\"\020\n\016MergeDataReply\"l\n\nD" +
-      "ataExists\022^\n\037instanceIdentifierPathArgum" +
-      "ents\030\001 \002(\01325.org.opendaylight.controller" +
-      ".mdsal.InstanceIdentifier\"!\n\017DataExistsR" +
-      "eply\022\016\n\006exists\030\001 \002(\010BV\n:org.opendaylight",
-      ".controller.protobuff.messages.transacti" +
-      "onB\030ShardTransactionMessages"
+      "(\t\022\027\n\017transactionType\030\002 \002(\005\022\032\n\022transacti" +
+      "onChainId\030\003 \001(\t\"M\n\026CreateTransactionRepl" +
+      "y\022\034\n\024transactionActorPath\030\001 \002(\t\022\025\n\rtrans" +
+      "actionId\030\002 \002(\t\"\022\n\020ReadyTransaction\"*\n\025Re" +
+      "adyTransactionReply\022\021\n\tactorPath\030\001 \002(\t\"l" +
+      "\n\nDeleteData\022^\n\037instanceIdentifierPathAr",
+      "guments\030\001 \002(\01325.org.opendaylight.control" +
+      "ler.mdsal.InstanceIdentifier\"\021\n\017DeleteDa" +
+      "taReply\"j\n\010ReadData\022^\n\037instanceIdentifie" +
+      "rPathArguments\030\001 \002(\01325.org.opendaylight." +
+      "controller.mdsal.InstanceIdentifier\"P\n\rR" +
+      "eadDataReply\022?\n\016normalizedNode\030\001 \001(\0132\'.o" +
+      "rg.opendaylight.controller.mdsal.Node\"\254\001" +
+      "\n\tWriteData\022^\n\037instanceIdentifierPathArg" +
+      "uments\030\001 \002(\01325.org.opendaylight.controll" +
+      "er.mdsal.InstanceIdentifier\022?\n\016normalize",
+      "dNode\030\002 \002(\0132\'.org.opendaylight.controlle" +
+      "r.mdsal.Node\"\020\n\016WriteDataReply\"\254\001\n\tMerge" +
+      "Data\022^\n\037instanceIdentifierPathArguments\030" +
+      "\001 \002(\01325.org.opendaylight.controller.mdsa" +
+      "l.InstanceIdentifier\022?\n\016normalizedNode\030\002" +
+      " \002(\0132\'.org.opendaylight.controller.mdsal" +
+      ".Node\"\020\n\016MergeDataReply\"l\n\nDataExists\022^\n" +
+      "\037instanceIdentifierPathArguments\030\001 \002(\01325" +
+      ".org.opendaylight.controller.mdsal.Insta" +
+      "nceIdentifier\"!\n\017DataExistsReply\022\016\n\006exis",
+      "ts\030\001 \002(\010BV\n:org.opendaylight.controller." +
+      "protobuff.messages.transactionB\030ShardTra" +
+      "nsactionMessages"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -7650,7 +7807,7 @@ public final class ShardTransactionMessages {
           internal_static_org_opendaylight_controller_mdsal_CreateTransaction_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_opendaylight_controller_mdsal_CreateTransaction_descriptor,
-              new java.lang.String[] { "TransactionId", "TransactionType", });
+              new java.lang.String[] { "TransactionId", "TransactionType", "TransactionChainId", });
           internal_static_org_opendaylight_controller_mdsal_CreateTransactionReply_descriptor =
             getDescriptor().getMessageTypes().get(3);
           internal_static_org_opendaylight_controller_mdsal_CreateTransactionReply_fieldAccessorTable = new
index c9d5e89ae1079c51249acaaeaf61fe27e2b7577c..0f93f43c566639641c32a3a1472d0c84ae8e513f 100644 (file)
@@ -100,7 +100,9 @@ public class XmlStreamUtils {
     for (Entry<URI, String> e: prefixes.getPrefixes()) {
       writer.writeNamespace(e.getValue(), e.getKey().toString());
     }
-    LOG.debug("Instance identifier with Random prefix is now {}", str);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Instance identifier with Random prefix is now {}", str);
+    }
     writer.writeCharacters(str);
   }
 
@@ -169,7 +171,7 @@ public class XmlStreamUtils {
         DataSchemaNode childSchema = null;
         if (schema instanceof DataNodeContainer) {
           childSchema = SchemaUtils.findFirstSchema(child.getNodeType(), ((DataNodeContainer) schema).getChildNodes()).orNull();
-          if (childSchema == null) {
+          if (childSchema == null && LOG.isDebugEnabled()) {
             LOG.debug("Probably the data node \"{}\" does not conform to schema", child == null ? "" : child.getNodeType().getLocalName());
           }
         }
@@ -192,7 +194,9 @@ public class XmlStreamUtils {
    */
   public void writeValue(final @Nonnull XMLStreamWriter writer, final @Nonnull TypeDefinition<?> type, final Object value) throws XMLStreamException {
     if (value == null) {
-      LOG.debug("Value of {}:{} is null, not encoding it", type.getQName().getNamespace(), type.getQName().getLocalName());
+      if(LOG.isDebugEnabled()){
+        LOG.debug("Value of {}:{} is null, not encoding it", type.getQName().getNamespace(), type.getQName().getLocalName());
+      }
       return;
     }
 
@@ -232,18 +236,24 @@ public class XmlStreamUtils {
       writer.writeNamespace(prefix, qname.getNamespace().toString());
       writer.writeCharacters(prefix + ':' + qname.getLocalName());
     } else {
-      LOG.debug("Value of {}:{} is not a QName but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
+      if(LOG.isDebugEnabled()) {
+        LOG.debug("Value of {}:{} is not a QName but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
+      }
       writer.writeCharacters(String.valueOf(value));
     }
   }
 
   private static void write(final @Nonnull XMLStreamWriter writer, final @Nonnull InstanceIdentifierTypeDefinition type, final @Nonnull Object value) throws XMLStreamException {
     if (value instanceof YangInstanceIdentifier) {
-      LOG.debug("Writing InstanceIdentifier object {}", value);
+      if(LOG.isDebugEnabled()) {
+          LOG.debug("Writing InstanceIdentifier object {}", value);
+      }
       write(writer, (YangInstanceIdentifier)value);
     } else {
-      LOG.debug("Value of {}:{} is not an InstanceIdentifier but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
-      writer.writeCharacters(String.valueOf(value));
+      if(LOG.isDebugEnabled()) {
+          LOG.debug("Value of {}:{} is not an InstanceIdentifier but {}", type.getQName().getNamespace(), type.getQName().getLocalName(), value.getClass());
+      }
+        writer.writeCharacters(String.valueOf(value));
     }
   }
 }
index ea8f4a3ef19810a6d95ebc4211b0f4569b6e2716..d0cc2adb5f06e1a61859e57b6b086857e36d3f22 100644 (file)
@@ -74,7 +74,9 @@ public class XmlUtils {
    * @return xml String
    */
   public static String inputCompositeNodeToXml(CompositeNode cNode, SchemaContext schemaContext){
-    LOG.debug("Converting input composite node to xml {}", cNode);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Converting input composite node to xml {}", cNode);
+    }
     if (cNode == null) {
         return BLANK;
     }
@@ -88,12 +90,14 @@ public class XmlUtils {
       Set<RpcDefinition> rpcs =  schemaContext.getOperations();
       for(RpcDefinition rpc : rpcs) {
         if(rpc.getQName().equals(cNode.getNodeType())){
-          LOG.debug("Found the rpc definition from schema context matching with input composite node  {}", rpc.getQName());
-
+          if(LOG.isDebugEnabled()) {
+              LOG.debug("Found the rpc definition from schema context matching with input composite node  {}", rpc.getQName());
+          }
           CompositeNode inputContainer = cNode.getFirstCompositeByName(QName.create(cNode.getNodeType(), "input"));
           domTree = XmlDocumentUtils.toDocument(inputContainer, rpc.getInput(), XmlDocumentUtils.defaultValueCodecProvider());
-
-          LOG.debug("input composite node to document conversion complete, document is   {}", domTree);
+          if(LOG.isDebugEnabled()) {
+              LOG.debug("input composite node to document conversion complete, document is   {}", domTree);
+          }
           break;
         }
       }
@@ -111,7 +115,9 @@ public class XmlUtils {
    * @return xml string
    */
   public static String outputCompositeNodeToXml(CompositeNode cNode, SchemaContext schemaContext){
-    LOG.debug("Converting output composite node to xml {}", cNode);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Converting output composite node to xml {}", cNode);
+    }
     if (cNode == null) {
         return BLANK;
     }
@@ -125,12 +131,14 @@ public class XmlUtils {
       Set<RpcDefinition> rpcs =  schemaContext.getOperations();
       for(RpcDefinition rpc : rpcs) {
         if(rpc.getQName().equals(cNode.getNodeType())){
-          LOG.debug("Found the rpc definition from schema context matching with output composite node  {}", rpc.getQName());
-
+          if(LOG.isDebugEnabled()) {
+              LOG.debug("Found the rpc definition from schema context matching with output composite node  {}", rpc.getQName());
+          }
           CompositeNode outputContainer = cNode.getFirstCompositeByName(QName.create(cNode.getNodeType(), "output"));
           domTree = XmlDocumentUtils.toDocument(outputContainer, rpc.getOutput(), XmlDocumentUtils.defaultValueCodecProvider());
-
-          LOG.debug("output composite node to document conversion complete, document is   {}", domTree);
+          if(LOG.isDebugEnabled()) {
+              LOG.debug("output composite node to document conversion complete, document is   {}", domTree);
+          }
           break;
         }
       }
@@ -152,8 +160,9 @@ public class XmlUtils {
 
       LOG.error("Error during translation of Document to OutputStream", e);
     }
-    LOG.debug("Document to string conversion complete, xml string is  {} ",  writer.toString());
-
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Document to string conversion complete, xml string is  {} ", writer.toString());
+    }
     return writer.toString();
   }
 
@@ -188,7 +197,9 @@ public class XmlUtils {
    * @return CompositeNode object based on the input, if any of the input parameter is null, a null object is returned
    */
   public static CompositeNode inputXmlToCompositeNode(QName rpc, String xml,  SchemaContext schemaContext){
-    LOG.debug("Converting input xml to composite node {}", xml);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Converting input xml to composite node {}", xml);
+    }
     if (xml==null || xml.length()==0) {
         return null;
     }
@@ -208,8 +219,9 @@ public class XmlUtils {
       Set<RpcDefinition> rpcs =  schemaContext.getOperations();
       for(RpcDefinition rpcDef : rpcs) {
         if(rpcDef.getQName().equals(rpc)){
-          LOG.debug("found the rpc definition from schema context matching rpc  {}", rpc);
-
+          if(LOG.isDebugEnabled()) {
+              LOG.debug("found the rpc definition from schema context matching rpc  {}", rpc);
+          }
           if(rpcDef.getInput() == null) {
             LOG.warn("found rpc definition's input is null");
             return null;
@@ -225,9 +237,9 @@ public class XmlUtils {
 
           List<Node<?>> dataNodes = XmlDocumentUtils.toDomNodes(xmlData,
               Optional.of(rpcDef.getInput().getChildNodes()), schemaContext);
-
-          LOG.debug("Converted xml input to list of nodes  {}", dataNodes);
-
+          if(LOG.isDebugEnabled()) {
+              LOG.debug("Converted xml input to list of nodes  {}", dataNodes);
+          }
           final CompositeNodeBuilder<ImmutableCompositeNode> it = ImmutableCompositeNode.builder();
           it.setQName(rpc);
           it.add(ImmutableCompositeNode.create(input, dataNodes));
@@ -240,8 +252,9 @@ public class XmlUtils {
     } catch (IOException e) {
       LOG.error("Error during building data tree from XML", e);
     }
-
-    LOG.debug("Xml to composite node conversion complete {} ", compositeNode);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Xml to composite node conversion complete {} ", compositeNode);
+    }
     return compositeNode;
   }
 
similarity index 82%
rename from opendaylight/md-sal/sal-akka-raft/src/main/resources/InstallSnapshot.proto
rename to opendaylight/md-sal/sal-clustering-commons/src/main/resources/InstallSnapshot.proto
index 14f821b5e20f1fd44f1f436cf76ce08e39e1ccf0..4198644b13952f8e05c5f9f4a0db75316594c7ba 100644 (file)
@@ -1,6 +1,6 @@
 package org.opendaylight.controller.cluster.raft;
 
-option java_package = "org.opendaylight.controller.cluster.raft.protobuff.messages";
+option java_package = "org.opendaylight.controller.protobuff.messages.cluster.raft";
 option java_outer_classname = "InstallSnapshotMessages";
 option optimize_for = SPEED;
 
index 63b75ac430f9bbbaca2ad8d1ecb7c63f0cff37d2..26581478d97a1449ff88a07383e9a88c554aa9a5 100644 (file)
@@ -15,6 +15,7 @@ message CloseTransactionReply{
 message CreateTransaction{
   required string transactionId = 1;
   required int32  transactionType =2;
+  optional string transactionChainId = 3;
 }
 
 message CreateTransactionReply{
index 42f87cbda6dbd661b395efccd5ae9e729420759f..5dc67aa98fda691d0d014599104181508d94af08 100644 (file)
@@ -4,20 +4,10 @@ option java_package = "org.opendaylight.controller.protobuff.messages.transactio
 option java_outer_classname = "ShardTransactionChainMessages";
 
 message CloseTransactionChain {
-
+    optional string transactionChainId = 1;
 }
 
 message CloseTransactionChainReply{
 
-
-}
-
-message CreateTransactionChain {
-
-}
-
-message CreateTransactionChainReply{
-required string transactionChainPath = 1;
-
 }
 
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/common/actor/CommonConfigTest.java b/opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/common/actor/CommonConfigTest.java
new file mode 100644 (file)
index 0000000..cd77ab2
--- /dev/null
@@ -0,0 +1,43 @@
+package org.opendaylight.controller.cluster.common.actor;
+
+import org.junit.Test;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class CommonConfigTest {
+
+    @Test
+    public void testCommonConfigDefaults(){
+        CommonConfig config = new CommonConfig.Builder<>("testsystem").build();
+
+        assertNotNull(config.getActorSystemName());
+        assertNotNull(config.getMailBoxCapacity());
+        assertNotNull(config.getMailBoxName());
+        assertNotNull(config.getMailBoxPushTimeout());
+        assertNotNull(config.isMetricCaptureEnabled());
+    }
+
+    @Test
+    public void testCommonConfigOverride(){
+
+        int expectedCapacity = 123;
+        String timeoutValue = "1000ms";
+        CommonConfig config = new CommonConfig.Builder<>("testsystem")
+                .mailboxCapacity(expectedCapacity)
+                .mailboxPushTimeout(timeoutValue)
+                .metricCaptureEnabled(true)
+                .build();
+
+        assertEquals(expectedCapacity, config.getMailBoxCapacity().intValue());
+
+        FiniteDuration expectedTimeout = FiniteDuration.create(1000, TimeUnit.MILLISECONDS);
+        assertEquals(expectedTimeout.toMillis(), config.getMailBoxPushTimeout().toMillis());
+
+        assertTrue(config.isMetricCaptureEnabled());
+    }
+}
\ No newline at end of file
@@ -5,7 +5,7 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.common.actor;
+package org.opendaylight.controller.cluster.common.actor;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
@@ -25,11 +25,13 @@ import java.util.concurrent.locks.ReentrantLock;
 public class MeteredBoundedMailboxTest {
 
     private static ActorSystem actorSystem;
+    private static CommonConfig config;
     private final ReentrantLock lock = new ReentrantLock();
 
     @Before
     public void setUp() throws Exception {
-        actorSystem = ActorSystem.create("testsystem");
+        config = new CommonConfig.Builder<>("testsystem").build();
+        actorSystem = ActorSystem.create("testsystem", config.get());
     }
 
     @After
@@ -39,15 +41,14 @@ public class MeteredBoundedMailboxTest {
     }
 
     @Test
-    public void test_WhenQueueIsFull_ShouldSendMsgToDeadLetter() throws InterruptedException {
+    public void shouldSendMsgToDeadLetterWhenQueueIsFull() throws InterruptedException {
         final JavaTestKit mockReceiver = new JavaTestKit(actorSystem);
         actorSystem.eventStream().subscribe(mockReceiver.getRef(), DeadLetter.class);
 
 
         final FiniteDuration TWENTY_SEC = new FiniteDuration(20, TimeUnit.SECONDS);
 
-        String boundedMailBox = actorSystem.name() + ".bounded-mailbox";
-        ActorRef pingPongActor = actorSystem.actorOf(PingPongActor.props(lock).withMailbox(boundedMailBox),
+        ActorRef pingPongActor = actorSystem.actorOf(PingPongActor.props(lock).withMailbox(config.getMailBoxName()),
                                                      "pingpongactor");
 
         actorSystem.mailboxes().settings();
index 0392dec3dd43075c2df343526bafa5cbd59e4ada..a8a6513bb6e5dd725ea20f22d17f8b369b7a97b0 100644 (file)
@@ -1,7 +1,7 @@
 testsystem {
 
   bounded-mailbox {
-    mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 10
     mailbox-push-timeout-time = 100ms
   }
index 3481bae8ae05851cbc6509d41cdc65bf7a561715..43696c765202e9fdfc71f695e232ee25fffaf442 100644 (file)
@@ -1,8 +1,5 @@
-testsystem {
-
   bounded-mailbox {
-    mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
     mailbox-push-timeout-time = 10ms
   }
-}
\ No newline at end of file
index 72da6304e54f2c19499189fa13ca0e74e30172db..fbb666a9caab32c90646796078132d1ed13c8bfa 100644 (file)
@@ -56,6 +56,9 @@
                         <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
                         <name>dom-broker</name>
                     </dom-broker>
+                    <enable-metric-capture xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:remote-rpc-connector">true</enable-metric-capture>
+                    <actor-system-name xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:remote-rpc-connector">odl-cluster-rpc</actor-system-name>
+                    <bounded-mailbox-capacity xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:remote-rpc-connector">1000</bounded-mailbox-capacity>
                 </module>
 
             </modules>
index 5a2116b50f92070687736329e95441a246ed49b0..f196ad1644791b92e5ec32a43ebeb36cb7411734 100644 (file)
@@ -1,12 +1,19 @@
 
 odl-cluster-data {
   bounded-mailbox {
-    mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
     mailbox-push-timeout-time = 100ms
-  }    
+  }
+
+  metric-capture-enabled = true
+
   akka {
+    loglevel = "INFO"
+    loggers = ["akka.event.slf4j.Slf4jLogger"]
+
     actor {
+
       provider = "akka.cluster.ClusterActorRefProvider"
       serializers {
                 java = "akka.serialization.JavaSerializer"
@@ -44,11 +51,17 @@ odl-cluster-data {
 
 odl-cluster-rpc {
   bounded-mailbox {
-    mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
     mailbox-push-timeout-time = 100ms
   }
+
+  metric-capture-enabled = true
+
   akka {
+    loglevel = "INFO"
+    loggers = ["akka.event.slf4j.Slf4jLogger"]
+
     actor {
       provider = "akka.cluster.ClusterActorRefProvider"
 
@@ -62,7 +75,7 @@ odl-cluster-rpc {
     }
 
     cluster {
-      seed-nodes = ["akka.tcp://opendaylight-cluster-rpc@127.0.0.1:2551"]
+      seed-nodes = ["akka.tcp://odl-cluster-rpc@127.0.0.1:2551"]
 
       auto-down-unreachable-after = 10s
     }
index 60313bf109ba30dd6692365181456337f9a91402..50952eaaf1ca90c89e02a5636e5d3594f0809252 100644 (file)
@@ -8,11 +8,10 @@
 
 package org.opendaylight.controller.md.sal.common.api.data;
 
+import com.google.common.base.Supplier;
 import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
-
-import com.google.common.base.Function;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 
 /**
  * A type of TransactionCommitFailedException that indicates a situation that would result in a
@@ -24,23 +23,21 @@ import com.google.common.base.Function;
  * @author Thomas Pantelis
  */
 public class TransactionCommitDeadlockException extends TransactionCommitFailedException {
-
     private static final long serialVersionUID = 1L;
-
     private static final String DEADLOCK_MESSAGE =
             "An attempt to block on a ListenableFuture via a get method from a write " +
             "transaction submit was detected that would result in deadlock. The commit " +
             "result must be obtained asynchronously, e.g. via Futures#addCallback, to avoid deadlock.";
+    private static final RpcError DEADLOCK_RPCERROR = RpcResultBuilder.newError(ErrorType.APPLICATION, "lock-denied", DEADLOCK_MESSAGE);
 
-    public static Function<Void, Exception> DEADLOCK_EXECUTOR_FUNCTION = new Function<Void, Exception>() {
+    public static final Supplier<Exception> DEADLOCK_EXCEPTION_SUPPLIER = new Supplier<Exception>() {
         @Override
-        public Exception apply(Void notUsed) {
-            return new TransactionCommitDeadlockException( DEADLOCK_MESSAGE,
-                    RpcResultBuilder.newError(ErrorType.APPLICATION, "lock-denied", DEADLOCK_MESSAGE));
+        public Exception get() {
+            return new TransactionCommitDeadlockException(DEADLOCK_MESSAGE, DEADLOCK_RPCERROR);
         }
     };
 
-    public TransactionCommitDeadlockException(String message, final RpcError... errors) {
+    public TransactionCommitDeadlockException(final String message, final RpcError... errors) {
         super(message, errors);
     }
 }
index d544c4b3710b06a12dac1bba3b11b5e13ced4f89..b2a03c298772caba9509e3122598dd8f1bc06aee 100644 (file)
@@ -7,9 +7,13 @@
  */
 package org.opendaylight.controller.md.sal.common.impl.service;
 
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.AsyncFunction;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
-
 import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
 import org.opendaylight.controller.md.sal.common.impl.AbstractDataModification;
@@ -19,15 +23,11 @@ import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.AsyncFunction;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-
 public abstract class AbstractDataTransaction<P extends Path<P>, D extends Object> extends
         AbstractDataModification<P, D> {
-    private final static Logger LOG = LoggerFactory.getLogger(AbstractDataTransaction.class);
+    private static final Logger LOG = LoggerFactory.getLogger(AbstractDataTransaction.class);
+    private static final ListenableFuture<RpcResult<TransactionStatus>> SUCCESS_FUTURE =
+            Futures.immediateFuture(RpcResultBuilder.success(TransactionStatus.COMMITED).build());
 
     private final Object identifier;
     private final long allocationTime;
@@ -55,9 +55,10 @@ public abstract class AbstractDataTransaction<P extends Path<P>, D extends Objec
     @Override
     public Future<RpcResult<TransactionStatus>> commit() {
         readyTime = System.nanoTime();
-        LOG.debug("Transaction {} Ready after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(readyTime - allocationTime));
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Transaction {} Ready after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(readyTime - allocationTime));
+        }
         changeStatus(TransactionStatus.SUBMITED);
-
         return this.broker.commit(this);
     }
 
@@ -88,7 +89,7 @@ public abstract class AbstractDataTransaction<P extends Path<P>, D extends Objec
     }
 
     @Override
-    public boolean equals(Object obj) {
+    public boolean equals(final Object obj) {
         if (this == obj) {
             return true;
         }
@@ -118,13 +119,18 @@ public abstract class AbstractDataTransaction<P extends Path<P>, D extends Objec
 
     public void succeeded() {
         this.completeTime = System.nanoTime();
-        LOG.debug("Transaction {} Committed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Transaction {} Committed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+        }
         changeStatus(TransactionStatus.COMMITED);
     }
 
     public void failed() {
         this.completeTime = System.nanoTime();
-        LOG.debug("Transaction {} Failed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Transaction {} Failed after {}ms.", identifier, TimeUnit.NANOSECONDS.toMillis(completeTime - readyTime));
+        }
         changeStatus(TransactionStatus.FAILED);
     }
 
@@ -134,14 +140,12 @@ public abstract class AbstractDataTransaction<P extends Path<P>, D extends Objec
         this.onStatusChange(status);
     }
 
-    public static ListenableFuture<RpcResult<TransactionStatus>> convertToLegacyCommitFuture(
-                                        CheckedFuture<Void,TransactionCommitFailedException> from ) {
+    public static ListenableFuture<RpcResult<TransactionStatus>> convertToLegacyCommitFuture(final CheckedFuture<Void,TransactionCommitFailedException> from) {
         return Futures.transform(from, new AsyncFunction<Void, RpcResult<TransactionStatus>>() {
             @Override
-            public ListenableFuture<RpcResult<TransactionStatus>> apply(Void input) throws Exception {
-                return Futures.immediateFuture(RpcResultBuilder.<TransactionStatus>
-                                                              success(TransactionStatus.COMMITED).build());
+            public ListenableFuture<RpcResult<TransactionStatus>> apply(final Void input) {
+                return SUCCESS_FUTURE;
             }
-        } );
+        });
     }
 }
index b67855d7312c697b2a7a0c1049a3b628701f4e08..3de49ae296f391bfa9b33afb3524a3ed4e0cc3ae 100644 (file)
@@ -16,6 +16,8 @@ import java.util.concurrent.ThreadPoolExecutor;
 import javax.annotation.Nullable;
 import org.opendaylight.yangtools.util.concurrent.CountingRejectedExecutionHandler;
 import org.opendaylight.yangtools.util.concurrent.TrackingLinkedBlockingQueue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * MXBean implementation of the ThreadExecutorStatsMXBean interface that retrieves statistics
@@ -25,7 +27,7 @@ import org.opendaylight.yangtools.util.concurrent.TrackingLinkedBlockingQueue;
  */
 public class ThreadExecutorStatsMXBeanImpl extends AbstractMXBean
                                            implements ThreadExecutorStatsMXBean {
-
+    private static final Logger LOG = LoggerFactory.getLogger(ThreadExecutorStatsMXBeanImpl.class);
     private final ThreadPoolExecutor executor;
 
     /**
@@ -36,14 +38,53 @@ public class ThreadExecutorStatsMXBeanImpl extends AbstractMXBean
      * @param mBeanType Used as the <code>type</code> property in the bean's ObjectName.
      * @param mBeanCategory Used as the <code>Category</code> property in the bean's ObjectName.
      */
-    public ThreadExecutorStatsMXBeanImpl(Executor executor, String mBeanName,
-            String mBeanType, @Nullable String mBeanCategory) {
+    public ThreadExecutorStatsMXBeanImpl(final ThreadPoolExecutor executor, final String mBeanName,
+            final String mBeanType, @Nullable final String mBeanCategory) {
         super(mBeanName, mBeanType, mBeanCategory);
+        this.executor = Preconditions.checkNotNull(executor);
+    }
+
+    private static ThreadExecutorStatsMXBeanImpl createInternal(final Executor executor,
+            final String mBeanName, final String mBeanType, final String mBeanCategory) {
+        if (executor instanceof ThreadPoolExecutor) {
+            final ThreadExecutorStatsMXBeanImpl ret = new ThreadExecutorStatsMXBeanImpl(
+                    (ThreadPoolExecutor) executor, mBeanName, mBeanType, mBeanCategory);
+            return ret;
+        }
+
+        LOG.info("Executor {} is not supported", executor);
+        return null;
+    }
+
+    /**
+     * Creates a new bean if the backing executor is a ThreadPoolExecutor and registers it.
+     *
+     * @param executor the backing {@link Executor}
+     * @param mBeanName Used as the <code>name</code> property in the bean's ObjectName.
+     * @param mBeanType Used as the <code>type</code> property in the bean's ObjectName.
+     * @param mBeanCategory Used as the <code>Category</code> property in the bean's ObjectName.
+     * @return a registered ThreadExecutorStatsMXBeanImpl instance if the backing executor
+     *         is a ThreadPoolExecutor, otherwise null.
+     */
+    public static ThreadExecutorStatsMXBeanImpl create(final Executor executor, final String mBeanName,
+            final String mBeanType, @Nullable final String mBeanCategory) {
+        ThreadExecutorStatsMXBeanImpl ret = createInternal(executor, mBeanName, mBeanType, mBeanCategory);
+        if(ret != null) {
+            ret.registerMBean();
+        }
 
-        Preconditions.checkArgument(executor instanceof ThreadPoolExecutor,
-                "The ExecutorService of type {} is not an instanceof ThreadPoolExecutor",
-                executor.getClass());
-        this.executor = (ThreadPoolExecutor)executor;
+        return ret;
+    }
+
+    /**
+     * Creates a new bean if the backing executor is a ThreadPoolExecutor.
+     *
+     * @param executor the backing {@link Executor}
+     * @return a ThreadExecutorStatsMXBeanImpl instance if the backing executor
+     *         is a ThreadPoolExecutor, otherwise null.
+     */
+    public static ThreadExecutorStatsMXBeanImpl create(final Executor executor) {
+        return createInternal(executor, "", "", null);
     }
 
     @Override
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ActorSystemFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ActorSystemFactory.java
deleted file mode 100644 (file)
index b326d61..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorSystem;
-import akka.actor.Props;
-import akka.osgi.BundleDelegatingClassLoader;
-import com.google.common.base.Preconditions;
-import com.typesafe.config.Config;
-import com.typesafe.config.ConfigFactory;
-import org.osgi.framework.BundleContext;
-
-import java.io.File;
-
-public class ActorSystemFactory {
-
-    public static final String AKKA_CONF_PATH = "./configuration/initial/akka.conf";
-    public static final String ACTOR_SYSTEM_NAME = "opendaylight-cluster-data";
-    public static final String CONFIGURATION_NAME = "odl-cluster-data";
-
-    private static volatile ActorSystem actorSystem = null;
-
-    public static final ActorSystem getInstance(){
-        return actorSystem;
-    }
-
-    /**
-     * This method should be called only once during initialization
-     *
-     * @param bundleContext
-     */
-    public static final ActorSystem createInstance(final BundleContext bundleContext) {
-        if(actorSystem == null) {
-            // Create an OSGi bundle classloader for actor system
-            BundleDelegatingClassLoader classLoader = new BundleDelegatingClassLoader(bundleContext.getBundle(),
-                Thread.currentThread().getContextClassLoader());
-            synchronized (ActorSystemFactory.class) {
-                // Double check
-
-                if (actorSystem == null) {
-                    ActorSystem system = ActorSystem.create(ACTOR_SYSTEM_NAME,
-                        ConfigFactory.load(readAkkaConfiguration()).getConfig(CONFIGURATION_NAME), classLoader);
-                    system.actorOf(Props.create(TerminationMonitor.class), "termination-monitor");
-                    actorSystem = system;
-                }
-            }
-        }
-
-        return actorSystem;
-    }
-
-
-    private static final Config readAkkaConfiguration(){
-        File defaultConfigFile = new File(AKKA_CONF_PATH);
-        Preconditions.checkState(defaultConfigFile.exists(), "akka.conf is missing");
-        return ConfigFactory.parseFile(defaultConfigFile);
-    }
-}
index 34239070a3dfafe15c8ba7e71c630eaf56136e3b..b2646060539332fc45d58776aaf0a7efe2b0d3ff 100644 (file)
@@ -13,6 +13,7 @@ import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategy
 
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 public interface Configuration {
 
@@ -52,4 +53,10 @@ public interface Configuration {
      * @return
      */
     List<String> getMembersFromShardName(String shardName);
+
+    /**
+     *
+     * @return
+     */
+    Set<String> getAllShardNames();
 }
index 37b565d2131debe797ca2ab975baf82550740a68..1a0a5dd6591c9a653ae2079b938d6448e59c036f 100644 (file)
@@ -23,8 +23,10 @@ import java.io.File;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 public class ConfigurationImpl implements Configuration {
 
@@ -161,6 +163,16 @@ public class ConfigurationImpl implements Configuration {
         return Collections.EMPTY_LIST;
     }
 
+    @Override public Set<String> getAllShardNames() {
+        Set<String> shardNames = new LinkedHashSet<>();
+        for(ModuleShard ms : moduleShards){
+            for(Shard s : ms.getShards()) {
+                shardNames.add(s.getName());
+            }
+        }
+        return shardNames;
+    }
+
 
 
     private void readModules(Config modulesConfig) {
index b3283a18b1baaf4c3c7530570b8ae8a09512211f..f1c0df4c3ad2a336a6aa8edc7282aa399f160c13 100644 (file)
@@ -12,6 +12,7 @@ import akka.actor.Props;
 import akka.japi.Creator;
 
 import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 
 import org.opendaylight.controller.cluster.datastore.messages.DataChanged;
 import org.opendaylight.controller.cluster.datastore.messages.DataChangedReply;
index 818f73392d1d880ad0dde5d84704c1e4c9d15c17..342611298c78b3ea5709473afaf8b2e2a19d4e9d 100644 (file)
@@ -11,6 +11,7 @@ package org.opendaylight.controller.cluster.datastore;
 import akka.actor.PoisonPill;
 import akka.actor.Props;
 import akka.japi.Creator;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistration;
 import org.opendaylight.controller.cluster.datastore.messages.CloseDataChangeListenerRegistrationReply;
index e3cdbb4ee131d1b0961e9d57c8eeb5ee6e568b61..acf630e2e95598e71fdbd786da628f3524a29408 100644 (file)
@@ -25,9 +25,10 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
  * </p>
  */
 public class DataChangeListenerRegistrationProxy implements ListenerRegistration {
-    private final ActorSelection listenerRegistrationActor;
+    private volatile ActorSelection listenerRegistrationActor;
     private final AsyncDataChangeListener listener;
     private final ActorRef dataChangeListenerActor;
+    private boolean closed = false;
 
     public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
     DataChangeListenerRegistrationProxy(
@@ -38,14 +39,51 @@ public class DataChangeListenerRegistrationProxy implements ListenerRegistration
         this.dataChangeListenerActor = dataChangeListenerActor;
     }
 
+    public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
+    DataChangeListenerRegistrationProxy(
+        L listener, ActorRef dataChangeListenerActor) {
+        this(null, listener, dataChangeListenerActor);
+    }
+
     @Override
     public Object getInstance() {
         return listener;
     }
 
+    public void setListenerRegistrationActor(ActorSelection listenerRegistrationActor) {
+        boolean sendCloseMessage = false;
+        synchronized(this) {
+            if(closed) {
+                sendCloseMessage = true;
+            } else {
+                this.listenerRegistrationActor = listenerRegistrationActor;
+            }
+        }
+        if(sendCloseMessage) {
+            listenerRegistrationActor.tell(new
+                CloseDataChangeListenerRegistration().toSerializable(), null);
+        }
+
+        this.listenerRegistrationActor = listenerRegistrationActor;
+    }
+
+    public ActorSelection getListenerRegistrationActor() {
+        return listenerRegistrationActor;
+    }
+
     @Override
     public void close() {
-        listenerRegistrationActor.tell(new CloseDataChangeListenerRegistration().toSerializable(), null);
+
+        boolean sendCloseMessage;
+        synchronized(this) {
+            sendCloseMessage = !closed && listenerRegistrationActor != null;
+            closed = true;
+        }
+        if(sendCloseMessage) {
+            listenerRegistrationActor.tell(new
+                CloseDataChangeListenerRegistration().toSerializable(), null);
+        }
+
         dataChangeListenerActor.tell(PoisonPill.getInstance(), null);
     }
 }
index db01d515354a9d166e2b906d8cd7168e7c39deb0..c780881a2ffad1ed50695b7a38111068ec2f8e3f 100644 (file)
@@ -10,9 +10,9 @@ package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
-
+import akka.dispatch.OnComplete;
+import akka.util.Timeout;
 import com.google.common.base.Preconditions;
-
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
@@ -32,6 +32,7 @@ import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
 
 /**
  *
@@ -39,6 +40,7 @@ import org.slf4j.LoggerFactory;
 public class DistributedDataStore implements DOMStore, SchemaContextListener, AutoCloseable {
 
     private static final Logger LOG = LoggerFactory.getLogger(DistributedDataStore.class);
+    public static final int REGISTER_DATA_CHANGE_LISTENER_TIMEOUT_FACTOR = 24; // 24 times the usual operation timeout
 
     private final ActorContext actorContext;
 
@@ -69,33 +71,48 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, Au
     @Override
     public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
                                               ListenerRegistration<L> registerChangeListener(
-        YangInstanceIdentifier path, L listener,
+        final YangInstanceIdentifier path, L listener,
         AsyncDataBroker.DataChangeScope scope) {
 
         Preconditions.checkNotNull(path, "path should not be null");
         Preconditions.checkNotNull(listener, "listener should not be null");
-
-        LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
+        }
         ActorRef dataChangeListenerActor = actorContext.getActorSystem().actorOf(
             DataChangeListener.props(listener ));
 
         String shardName = ShardStrategyFactory.getStrategy(path).findShard(path);
 
-        Object result = actorContext.executeLocalShardOperation(shardName,
-            new RegisterChangeListener(path, dataChangeListenerActor.path(), scope));
-
-        if (result != null) {
-            RegisterChangeListenerReply reply = (RegisterChangeListenerReply) result;
-            return new DataChangeListenerRegistrationProxy(actorContext
-                .actorSelection(reply.getListenerRegistrationPath()), listener,
-                dataChangeListenerActor);
+        Future future = actorContext.executeLocalShardOperationAsync(shardName,
+            new RegisterChangeListener(path, dataChangeListenerActor.path(), scope),
+            new Timeout(actorContext.getOperationDuration().$times(
+                REGISTER_DATA_CHANGE_LISTENER_TIMEOUT_FACTOR)));
+
+        if (future != null) {
+            final DataChangeListenerRegistrationProxy listenerRegistrationProxy =
+                new DataChangeListenerRegistrationProxy(listener, dataChangeListenerActor);
+
+            future.onComplete(new OnComplete(){
+
+                @Override public void onComplete(Throwable failure, Object result)
+                    throws Throwable {
+                    if(failure != null){
+                        LOG.error("Failed to register listener at path " + path.toString(), failure);
+                        return;
+                    }
+                    RegisterChangeListenerReply reply = (RegisterChangeListenerReply) result;
+                    listenerRegistrationProxy.setListenerRegistrationActor(actorContext
+                        .actorSelection(reply.getListenerRegistrationPath()));
+                }
+            }, actorContext.getActorSystem().dispatcher());
+            return listenerRegistrationProxy;
+        }
+        if(LOG.isDebugEnabled()) {
+            LOG.debug(
+                "No local shard for shardName {} was found so returning a noop registration",
+                shardName);
         }
-
-        LOG.debug(
-            "No local shard for shardName {} was found so returning a noop registration",
-            shardName);
-
         return new NoOpDataChangeListenerRegistration(listener);
     }
 
index 8739ed1966b618a3843c8e23e5671ea05eb48f15..a6f187d065504b21f2f4cf5a7b0d27007880d831 100644 (file)
@@ -9,22 +9,60 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorSystem;
-
+import akka.actor.Props;
+import akka.osgi.BundleDelegatingClassLoader;
+import com.google.common.base.Preconditions;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
 import org.opendaylight.controller.sal.core.api.model.SchemaService;
 import org.osgi.framework.BundleContext;
 
+import java.io.File;
+import java.util.concurrent.atomic.AtomicReference;
+
 public class DistributedDataStoreFactory {
+
+    public static final String AKKA_CONF_PATH = "./configuration/initial/akka.conf";
+    public static final String ACTOR_SYSTEM_NAME = "opendaylight-cluster-data";
+    public static final String CONFIGURATION_NAME = "odl-cluster-data";
+    private static AtomicReference<ActorSystem> actorSystem = new AtomicReference<>();
+
     public static DistributedDataStore createInstance(String name, SchemaService schemaService,
-            DatastoreContext datastoreContext, BundleContext bundleContext) {
+                                                      DatastoreContext datastoreContext, BundleContext bundleContext) {
 
-        ActorSystem actorSystem = ActorSystemFactory.createInstance(bundleContext);
+        ActorSystem actorSystem = getOrCreateInstance(bundleContext);
         Configuration config = new ConfigurationImpl("module-shards.conf", "modules.conf");
         final DistributedDataStore dataStore =
-            new DistributedDataStore(actorSystem, name, new ClusterWrapperImpl(actorSystem),
-                    config, datastoreContext );
+                new DistributedDataStore(actorSystem, name, new ClusterWrapperImpl(actorSystem),
+                        config, datastoreContext);
+
         ShardStrategyFactory.setConfiguration(config);
         schemaService.registerSchemaContextListener(dataStore);
         return dataStore;
     }
+
+    synchronized private static final ActorSystem getOrCreateInstance(final BundleContext bundleContext) {
+
+        if (actorSystem.get() != null){
+            return actorSystem.get();
+        }
+        // Create an OSGi bundle classloader for actor system
+        BundleDelegatingClassLoader classLoader = new BundleDelegatingClassLoader(bundleContext.getBundle(),
+                Thread.currentThread().getContextClassLoader());
+
+        ActorSystem system = ActorSystem.create(ACTOR_SYSTEM_NAME,
+                ConfigFactory.load(readAkkaConfiguration()).getConfig(CONFIGURATION_NAME), classLoader);
+        system.actorOf(Props.create(TerminationMonitor.class), "termination-monitor");
+
+        actorSystem.set(system);
+        return system;
+    }
+
+
+    private static final Config readAkkaConfiguration() {
+        File defaultConfigFile = new File(AKKA_CONF_PATH);
+        Preconditions.checkState(defaultConfigFile.exists(), "akka.conf is missing");
+        return ConfigFactory.parseFile(defaultConfigFile);
+    }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreProperties.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreProperties.java
deleted file mode 100644 (file)
index e69de29..0000000
index 7d570046d406feec976620f9215398475711a756..0fa27706e19382c0b84cc44b93d890ee9f0d1c8e 100644 (file)
@@ -26,14 +26,15 @@ import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.InvalidProtocolBufferException;
+import org.opendaylight.controller.cluster.common.actor.CommonConfig;
+import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
+import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChain;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChainReply;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
 import org.opendaylight.controller.cluster.datastore.messages.ForwardedCommitTransaction;
@@ -51,6 +52,7 @@ import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
 import org.opendaylight.controller.cluster.raft.RaftActor;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
@@ -59,6 +61,7 @@ import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessa
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -107,11 +110,12 @@ public class Shard extends RaftActor {
 
     private final DatastoreContext datastoreContext;
 
-
     private SchemaContext schemaContext;
 
     private ActorRef createSnapshotTransaction;
 
+    private final Map<String, DOMStoreTransactionChain> transactionChains = new HashMap<>();
+
     private Shard(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses,
             DatastoreContext datastoreContext, SchemaContext schemaContext) {
         super(name.toString(), mapPeerAddresses(peerAddresses), Optional.of(configParams));
@@ -138,6 +142,9 @@ public class Shard extends RaftActor {
         shardMBean.setDataStoreExecutor(store.getDomStoreExecutor());
         shardMBean.setNotificationManager(store.getDataChangeListenerNotificationManager());
 
+        if (isMetricsCaptureEnabled()) {
+            getContext().become(new MeteringBehavior(this));
+        }
     }
 
     private static Map<String, String> mapPeerAddresses(
@@ -164,8 +171,11 @@ public class Shard extends RaftActor {
     }
 
     @Override public void onReceiveRecover(Object message) {
-        LOG.debug("onReceiveRecover: Received message {} from {}", message.getClass().toString(),
-            getSender());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("onReceiveRecover: Received message {} from {}",
+                message.getClass().toString(),
+                getSender());
+        }
 
         if (message instanceof RecoveryFailure){
             LOG.error(((RecoveryFailure) message).cause(), "Recovery failed because of this cause");
@@ -175,25 +185,26 @@ public class Shard extends RaftActor {
     }
 
     @Override public void onReceiveCommand(Object message) {
-        LOG.debug("onReceiveCommand: Received message {} from {}", message.getClass().toString(),
-            getSender());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("onReceiveCommand: Received message {} from {}",
+                message.getClass().toString(),
+                getSender());
+        }
 
-        if (message.getClass()
-            .equals(CreateTransactionChain.SERIALIZABLE_CLASS)) {
-            if (isLeader()) {
-                createTransactionChain();
-            } else if (getLeader() != null) {
-                getLeader().forward(message, getContext());
-            }
-        } else if(message.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
+        if(message.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
             // This must be for install snapshot. Don't want to open this up and trigger
             // deSerialization
-            self().tell(new CaptureSnapshotReply(ReadDataReply.getNormalizedNodeByteString(message)), self());
+            self()
+                .tell(new CaptureSnapshotReply(ReadDataReply.getNormalizedNodeByteString(message)),
+                    self());
 
+            createSnapshotTransaction = null;
             // Send a PoisonPill instead of sending close transaction because we do not really need
             // a response
             getSender().tell(PoisonPill.getInstance(), self());
 
+        } else if (message.getClass().equals(CloseTransactionChain.SERIALIZABLE_CLASS)){
+            closeTransactionChain(CloseTransactionChain.fromSerializable(message));
         } else if (message instanceof RegisterChangeListener) {
             registerChangeListener((RegisterChangeListener) message);
         } else if (message instanceof UpdateSchemaContext) {
@@ -206,6 +217,9 @@ public class Shard extends RaftActor {
                 createTransaction(CreateTransaction.fromSerializable(message));
             } else if (getLeader() != null) {
                 getLeader().forward(message, getContext());
+            } else {
+                getSender().tell(new akka.actor.Status.Failure(new IllegalStateException(
+                    "Could not find leader so transaction cannot be created")), getSelf());
             }
         } else if (message instanceof PeerAddressResolved) {
             PeerAddressResolved resolved = (PeerAddressResolved) message;
@@ -216,9 +230,30 @@ public class Shard extends RaftActor {
         }
     }
 
+    private void closeTransactionChain(CloseTransactionChain closeTransactionChain) {
+        DOMStoreTransactionChain chain =
+            transactionChains.remove(closeTransactionChain.getTransactionChainId());
+
+        if(chain != null) {
+            chain.close();
+        }
+    }
+
     private ActorRef createTypedTransactionActor(
         int transactionType,
-        ShardTransactionIdentifier transactionId) {
+        ShardTransactionIdentifier transactionId,
+        String transactionChainId ) {
+
+        DOMStoreTransactionFactory factory = store;
+
+        if(!transactionChainId.isEmpty()) {
+            factory = transactionChains.get(transactionChainId);
+            if(factory == null){
+                DOMStoreTransactionChain transactionChain = store.createTransactionChain();
+                transactionChains.put(transactionChainId, transactionChain);
+                factory = transactionChain;
+            }
+        }
 
         if(this.schemaContext == null){
             throw new NullPointerException("schemaContext should not be null");
@@ -230,7 +265,7 @@ public class Shard extends RaftActor {
             shardMBean.incrementReadOnlyTransactionCount();
 
             return getContext().actorOf(
-                ShardTransaction.props(store.newReadOnlyTransaction(), getSelf(),
+                ShardTransaction.props(factory.newReadOnlyTransaction(), getSelf(),
                         schemaContext,datastoreContext, shardMBean), transactionId.toString());
 
         } else if (transactionType
@@ -239,7 +274,7 @@ public class Shard extends RaftActor {
             shardMBean.incrementReadWriteTransactionCount();
 
             return getContext().actorOf(
-                ShardTransaction.props(store.newReadWriteTransaction(), getSelf(),
+                ShardTransaction.props(factory.newReadWriteTransaction(), getSelf(),
                         schemaContext, datastoreContext, shardMBean), transactionId.toString());
 
 
@@ -249,7 +284,7 @@ public class Shard extends RaftActor {
             shardMBean.incrementWriteOnlyTransactionCount();
 
             return getContext().actorOf(
-                ShardTransaction.props(store.newWriteOnlyTransaction(), getSelf(),
+                ShardTransaction.props(factory.newWriteOnlyTransaction(), getSelf(),
                         schemaContext, datastoreContext, shardMBean), transactionId.toString());
         } else {
             throw new IllegalArgumentException(
@@ -260,18 +295,20 @@ public class Shard extends RaftActor {
 
     private void createTransaction(CreateTransaction createTransaction) {
         createTransaction(createTransaction.getTransactionType(),
-            createTransaction.getTransactionId());
+            createTransaction.getTransactionId(), createTransaction.getTransactionChainId());
     }
 
-    private ActorRef createTransaction(int transactionType, String remoteTransactionId) {
+    private ActorRef createTransaction(int transactionType, String remoteTransactionId, String transactionChainId) {
 
         ShardTransactionIdentifier transactionId =
             ShardTransactionIdentifier.builder()
                 .remoteTransactionId(remoteTransactionId)
                 .build();
-        LOG.debug("Creating transaction : {} ", transactionId);
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Creating transaction : {} ", transactionId);
+        }
         ActorRef transactionActor =
-            createTypedTransactionActor(transactionType, transactionId);
+            createTypedTransactionActor(transactionType, transactionId, transactionChainId);
 
         getSender()
             .tell(new CreateTransactionReply(
@@ -296,11 +333,20 @@ public class Shard extends RaftActor {
         DOMStoreThreePhaseCommitCohort cohort =
             modificationToCohort.remove(serialized);
         if (cohort == null) {
-            LOG.debug(
-                "Could not find cohort for modification : {}. Writing modification using a new transaction",
-                modification);
+
+            if(LOG.isDebugEnabled()) {
+                LOG.debug(
+                    "Could not find cohort for modification : {}. Writing modification using a new transaction",
+                    modification);
+            }
+
             DOMStoreWriteTransaction transaction =
                 store.newWriteOnlyTransaction();
+
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Created new transaction {}", transaction.getIdentifier().toString());
+            }
+
             modification.apply(transaction);
             try {
                 syncCommitTransaction(transaction);
@@ -314,13 +360,18 @@ public class Shard extends RaftActor {
             return;
         }
 
-        final ListenableFuture<Void> future = cohort.commit();
-        final ActorRef self = getSelf();
+
+        if(sender == null){
+            LOG.error("Commit failed. Sender cannot be null");
+            return;
+        }
+
+        ListenableFuture<Void> future = cohort.commit();
 
         Futures.addCallback(future, new FutureCallback<Void>() {
             @Override
             public void onSuccess(Void v) {
-                sender.tell(new CommitTransactionReply().toSerializable(), self);
+                sender.tell(new CommitTransactionReply().toSerializable(), getSelf());
                 shardMBean.incrementCommittedTransactionCount();
                 shardMBean.setLastCommittedTransactionTime(System.currentTimeMillis());
             }
@@ -329,7 +380,7 @@ public class Shard extends RaftActor {
             public void onFailure(Throwable t) {
                 LOG.error(t, "An exception happened during commit");
                 shardMBean.incrementFailedTransactionsCount();
-                sender.tell(new akka.actor.Status.Failure(t), self);
+                sender.tell(new akka.actor.Status.Failure(t), getSelf());
             }
         });
 
@@ -363,8 +414,10 @@ public class Shard extends RaftActor {
     private void registerChangeListener(
         RegisterChangeListener registerChangeListener) {
 
-        LOG.debug("registerDataChangeListener for {}", registerChangeListener
-            .getPath());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("registerDataChangeListener for {}", registerChangeListener
+                .getPath());
+        }
 
 
         ActorSelection dataChangeListenerPath = getContext()
@@ -392,21 +445,20 @@ public class Shard extends RaftActor {
             getContext().actorOf(
                 DataChangeListenerRegistration.props(registration));
 
-        LOG.debug(
-            "registerDataChangeListener sending reply, listenerRegistrationPath = {} "
-            , listenerRegistration.path().toString());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug(
+                "registerDataChangeListener sending reply, listenerRegistrationPath = {} "
+                , listenerRegistration.path().toString());
+        }
 
         getSender()
             .tell(new RegisterChangeListenerReply(listenerRegistration.path()),
                 getSelf());
     }
 
-    private void createTransactionChain() {
-        DOMStoreTransactionChain chain = store.createTransactionChain();
-        ActorRef transactionChain = getContext().actorOf(
-                ShardTransactionChain.props(chain, schemaContext, datastoreContext, shardMBean));
-        getSender().tell(new CreateTransactionChainReply(transactionChain.path()).toSerializable(),
-            getSelf());
+    private boolean isMetricsCaptureEnabled(){
+        CommonConfig config = new CommonConfig(getContext().system().settings().config());
+        return config.isMetricCaptureEnabled();
     }
 
     @Override protected void applyState(ActorRef clientActor, String identifier,
@@ -425,7 +477,7 @@ public class Shard extends RaftActor {
             }
 
         } else {
-            LOG.error("Unknown state received {}", data);
+            LOG.error("Unknown state received {} Class loader = {} CompositeNodeMod.ClassLoader = {}", data, data.getClass().getClassLoader(), CompositeModificationPayload.class.getClassLoader());
         }
 
         // Update stats
@@ -448,7 +500,7 @@ public class Shard extends RaftActor {
             // so that this actor does not get block building the snapshot
             createSnapshotTransaction = createTransaction(
                 TransactionProxy.TransactionType.READ_ONLY.ordinal(),
-                "createSnapshot");
+                "createSnapshot", "");
 
             createSnapshotTransaction.tell(
                 new ReadData(YangInstanceIdentifier.builder().build()).toSerializable(), self());
@@ -460,6 +512,8 @@ public class Shard extends RaftActor {
         // Since this will be done only on Recovery or when this actor is a Follower
         // we can safely commit everything in here. We not need to worry about event notifications
         // as they would have already been disabled on the follower
+
+        LOG.info("Applying snapshot");
         try {
             DOMStoreWriteTransaction transaction = store.newWriteOnlyTransaction();
             NormalizedNodeMessages.Node serializedNode = NormalizedNodeMessages.Node.parseFrom(snapshot);
@@ -474,6 +528,8 @@ public class Shard extends RaftActor {
             syncCommitTransaction(transaction);
         } catch (InvalidProtocolBufferException | InterruptedException | ExecutionException e) {
             LOG.error(e, "An exception occurred when applying snapshot");
+        } finally {
+            LOG.info("Done applying snapshot");
         }
     }
 
@@ -483,12 +539,26 @@ public class Shard extends RaftActor {
                 .tell(new EnableNotification(isLeader()), getSelf());
         }
 
-        if (getLeaderId() != null) {
-            shardMBean.setLeader(getLeaderId());
-        }
-
         shardMBean.setRaftState(getRaftState().name());
         shardMBean.setCurrentTerm(getCurrentTerm());
+
+        // If this actor is no longer the leader close all the transaction chains
+        if(!isLeader()){
+            for(Map.Entry<String, DOMStoreTransactionChain> entry : transactionChains.entrySet()){
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug(
+                        "onStateChanged: Closing transaction chain {} because shard {} is no longer the leader",
+                        entry.getKey(), getId());
+                }
+                entry.getValue().close();
+            }
+
+            transactionChains.clear();
+        }
+    }
+
+    @Override protected void onLeaderChanged(String oldLeader, String newLeader) {
+        shardMBean.setLeader(newLeader);
     }
 
     @Override public String persistenceId() {
index 58cdefe5371d2b58be6e7c9f5e461734f34acd07..a97c00f1d88227fb9d01c90ce38a80b8ccbb1e50 100644 (file)
@@ -18,6 +18,8 @@ import akka.cluster.ClusterEvent;
 import akka.japi.Creator;
 import akka.japi.Function;
 import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
+
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfo;
@@ -48,7 +50,7 @@ import java.util.Map;
  * <li> Monitor the cluster members and store their addresses
  * <ul>
  */
-public class ShardManager extends AbstractUntypedActor {
+public class ShardManager extends AbstractUntypedActorWithMetering {
 
     // Stores a mapping between a member name and the address of the member
     // Member names look like "member-1", "member-2" etc and are as specified
@@ -335,11 +337,11 @@ public class ShardManager extends AbstractUntypedActor {
                 peerAddress);
             if(peerAddresses.containsKey(peerId)){
                 peerAddresses.put(peerId, peerAddress);
-
-                LOG.debug(
-                    "Sending PeerAddressResolved for peer {} with address {} to {}",
-                    peerId, peerAddress, actor.path());
-
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug(
+                        "Sending PeerAddressResolved for peer {} with address {} to {}",
+                        peerId, peerAddress, actor.path());
+                }
                 actor
                     .tell(new PeerAddressResolved(peerId, peerAddress),
                         getSelf());
index 65f865b0c43ecdd6da13754605bccdc91a6f472e..f5ca6e3c5aa2334eb26c52408dd7279f08e33d3a 100644 (file)
@@ -16,6 +16,8 @@ import akka.japi.Creator;
 
 import com.google.common.base.Optional;
 import com.google.common.util.concurrent.CheckedFuture;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+
 
 import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
@@ -103,7 +105,9 @@ public abstract class ShardTransaction extends AbstractUntypedActor {
             getSender().tell(new GetCompositeModificationReply(
                     new ImmutableCompositeModification(modification)), getSelf());
         } else if (message instanceof ReceiveTimeout) {
-            LOG.debug("Got ReceiveTimeout for inactivity - closing Tx");
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Got ReceiveTimeout for inactivity - closing Tx");
+            }
             closeTransaction(false);
         } else {
             throw new UnknownMessageException(message);
@@ -161,8 +165,9 @@ public abstract class ShardTransaction extends AbstractUntypedActor {
     protected void writeData(DOMStoreWriteTransaction transaction, WriteData message) {
         modification.addModification(
                 new WriteModification(message.getPath(), message.getData(),schemaContext));
-        LOG.debug("writeData at path : " + message.getPath().toString());
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("writeData at path : " + message.getPath().toString());
+        }
         try {
             transaction.write(message.getPath(), message.getData());
             getSender().tell(new WriteDataReply().toSerializable(), getSelf());
@@ -174,7 +179,9 @@ public abstract class ShardTransaction extends AbstractUntypedActor {
     protected void mergeData(DOMStoreWriteTransaction transaction, MergeData message) {
         modification.addModification(
                 new MergeModification(message.getPath(), message.getData(), schemaContext));
-        LOG.debug("mergeData at path : " + message.getPath().toString());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("mergeData at path : " + message.getPath().toString());
+        }
         try {
             transaction.merge(message.getPath(), message.getData());
             getSender().tell(new MergeDataReply().toSerializable(), getSelf());
@@ -184,7 +191,9 @@ public abstract class ShardTransaction extends AbstractUntypedActor {
     }
 
     protected void deleteData(DOMStoreWriteTransaction transaction, DeleteData message) {
-        LOG.debug("deleteData at path : " + message.getPath().toString());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("deleteData at path : " + message.getPath().toString());
+        }
         modification.addModification(new DeleteModification(message.getPath()));
         try {
             transaction.delete(message.getPath());
index 484bd54a0743616ebb3fdb3bd95f0c1c253b1996..8fe94cf468b6b63e78128a5f44ca2b1b7cebdd55 100644 (file)
@@ -11,6 +11,7 @@ package org.opendaylight.controller.cluster.datastore;
 import akka.actor.ActorRef;
 import akka.actor.Props;
 import akka.japi.Creator;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
index e6ac7f8dbc368665d9cb13d623e531533f51e04a..0c3d33a78c4801002571e2b5dc0ab02f8ddae971 100644 (file)
@@ -25,7 +25,9 @@ public class TerminationMonitor extends UntypedActor{
     @Override public void onReceive(Object message) throws Exception {
         if(message instanceof Terminated){
             Terminated terminated = (Terminated) message;
-            LOG.debug("Actor terminated : {}", terminated.actor());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Actor terminated : {}", terminated.actor());
+            }
         } else if(message instanceof Monitor){
             Monitor monitor = (Monitor) message;
             getContext().watch(monitor.getActorRef());
index 2dce6a1079c4fdbb0a8e2fa090fa018908d3f5ce..df85bb136a93b51084676a39cb1d7b53b54b7037 100644 (file)
@@ -18,6 +18,7 @@ import akka.japi.Creator;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
@@ -100,7 +101,9 @@ public class ThreePhaseCommitCohort extends AbstractUntypedActor {
 
     private void commit(CommitTransaction message) {
         // Forward the commit to the shard
-        log.debug("Forward commit transaction to Shard {} ", shardActor);
+        if(log.isDebugEnabled()) {
+            log.debug("Forward commit transaction to Shard {} ", shardActor);
+        }
         shardActor.forward(new ForwardedCommitTransaction(cohort, modification),
             getContext());
 
index a5be69531d73ede89f7bba5978a85d2e045d8989..a7a5b31b174e4e0d03db192aa367a43ebe67ad62 100644 (file)
@@ -65,9 +65,10 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
             @Override
             public Void apply(Iterable<ActorPath> paths) {
                 cohortPaths = Lists.newArrayList(paths);
-
-                LOG.debug("Tx {} successfully built cohort path list: {}",
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("Tx {} successfully built cohort path list: {}",
                         transactionId, cohortPaths);
+                }
                 return null;
             }
         }, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getActorSystem().dispatcher());
@@ -75,8 +76,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
 
     @Override
     public ListenableFuture<Boolean> canCommit() {
-        LOG.debug("Tx {} canCommit", transactionId);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} canCommit", transactionId);
+        }
         final SettableFuture<Boolean> returnFuture = SettableFuture.create();
 
         // The first phase of canCommit is to gather the list of cohort actor paths that will
@@ -89,7 +91,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
             @Override
             public void onComplete(Throwable failure, Void notUsed) throws Throwable {
                 if(failure != null) {
-                    LOG.debug("Tx {}: a cohort path Future failed: {}", transactionId, failure);
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("Tx {}: a cohort path Future failed: {}", transactionId, failure);
+                    }
                     returnFuture.setException(failure);
                 } else {
                     finishCanCommit(returnFuture);
@@ -101,9 +105,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
     }
 
     private void finishCanCommit(final SettableFuture<Boolean> returnFuture) {
-
-        LOG.debug("Tx {} finishCanCommit", transactionId);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} finishCanCommit", transactionId);
+        }
         // The last phase of canCommit is to invoke all the cohort actors asynchronously to perform
         // their canCommit processing. If any one fails then we'll fail canCommit.
 
@@ -114,7 +118,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
             @Override
             public void onComplete(Throwable failure, Iterable<Object> responses) throws Throwable {
                 if(failure != null) {
-                    LOG.debug("Tx {}: a canCommit cohort Future failed: {}", transactionId, failure);
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("Tx {}: a canCommit cohort Future failed: {}", transactionId, failure);
+                    }
                     returnFuture.setException(failure);
                     return;
                 }
@@ -135,9 +141,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
                         return;
                     }
                 }
-
-                LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result);
-
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("Tx {}: canCommit returning result: {}", transactionId, result);
+                }
                 returnFuture.set(Boolean.valueOf(result));
             }
         }, actorContext.getActorSystem().dispatcher());
@@ -146,9 +152,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
     private Future<Iterable<Object>> invokeCohorts(Object message) {
         List<Future<Object>> futureList = Lists.newArrayListWithCapacity(cohortPaths.size());
         for(ActorPath actorPath : cohortPaths) {
-
-            LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, actorPath);
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {}: Sending {} to cohort {}", transactionId, message, actorPath);
+            }
             ActorSelection cohort = actorContext.actorSelection(actorPath);
 
             futureList.add(actorContext.executeRemoteOperationAsync(cohort, message));
@@ -184,8 +190,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
     private ListenableFuture<Void> voidOperation(final String operationName, final Object message,
             final Class<?> expectedResponseClass, final boolean propagateException) {
 
-        LOG.debug("Tx {} {}", transactionId, operationName);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} {}", transactionId, operationName);
+        }
         final SettableFuture<Void> returnFuture = SettableFuture.create();
 
         // The cohort actor list should already be built at this point by the canCommit phase but,
@@ -199,9 +206,10 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
                 @Override
                 public void onComplete(Throwable failure, Void notUsed) throws Throwable {
                     if(failure != null) {
-                        LOG.debug("Tx {}: a {} cohort path Future failed: {}", transactionId,
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug("Tx {}: a {} cohort path Future failed: {}", transactionId,
                                 operationName, failure);
-
+                        }
                         if(propagateException) {
                             returnFuture.setException(failure);
                         } else {
@@ -221,9 +229,9 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
     private void finishVoidOperation(final String operationName, final Object message,
             final Class<?> expectedResponseClass, final boolean propagateException,
             final SettableFuture<Void> returnFuture) {
-
-        LOG.debug("Tx {} finish {}", transactionId, operationName);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} finish {}", transactionId, operationName);
+        }
         Future<Iterable<Object>> combinedFuture = invokeCohorts(message);
 
         combinedFuture.onComplete(new OnComplete<Iterable<Object>>() {
@@ -243,9 +251,10 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
                 }
 
                 if(exceptionToPropagate != null) {
-                    LOG.debug("Tx {}: a {} cohort Future failed: {}", transactionId,
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("Tx {}: a {} cohort Future failed: {}", transactionId,
                             operationName, exceptionToPropagate);
-
+                    }
                     if(propagateException) {
                         // We don't log the exception here to avoid redundant logging since we're
                         // propagating to the caller in MD-SAL core who will log it.
@@ -254,12 +263,16 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
                         // Since the caller doesn't want us to propagate the exception we'll also
                         // not log it normally. But it's usually not good to totally silence
                         // exceptions so we'll log it to debug level.
-                        LOG.debug(String.format("%s failed",  message.getClass().getSimpleName()),
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug(String.format("%s failed", message.getClass().getSimpleName()),
                                 exceptionToPropagate);
+                        }
                         returnFuture.set(null);
                     }
                 } else {
-                    LOG.debug("Tx {}: {} succeeded", transactionId, operationName);
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("Tx {}: {} succeeded", transactionId, operationName);
+                    }
                     returnFuture.set(null);
                 }
             }
index 9b4610a99c4a6e5977115f560d12b551131e9be5..b74c89d727c2a1761a7c993272abcab1e4ad9999 100644 (file)
@@ -8,43 +8,72 @@
 
 package org.opendaylight.controller.cluster.datastore;
 
+import akka.actor.ActorPath;
+import akka.dispatch.Futures;
+import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
 import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+
+import java.util.Collections;
+import java.util.List;
 
 /**
  * TransactionChainProxy acts as a proxy for a DOMStoreTransactionChain created on a remote shard
  */
 public class TransactionChainProxy implements DOMStoreTransactionChain{
     private final ActorContext actorContext;
+    private final String transactionChainId;
+    private volatile List<Future<ActorPath>> cohortPathFutures = Collections.emptyList();
 
     public TransactionChainProxy(ActorContext actorContext) {
         this.actorContext = actorContext;
+        transactionChainId = actorContext.getCurrentMemberName() + "-" + System.currentTimeMillis();
     }
 
     @Override
     public DOMStoreReadTransaction newReadOnlyTransaction() {
         return new TransactionProxy(actorContext,
-            TransactionProxy.TransactionType.READ_ONLY);
+            TransactionProxy.TransactionType.READ_ONLY, this);
     }
 
     @Override
     public DOMStoreReadWriteTransaction newReadWriteTransaction() {
         return new TransactionProxy(actorContext,
-            TransactionProxy.TransactionType.READ_WRITE);
+            TransactionProxy.TransactionType.READ_WRITE, this);
     }
 
     @Override
     public DOMStoreWriteTransaction newWriteOnlyTransaction() {
         return new TransactionProxy(actorContext,
-            TransactionProxy.TransactionType.WRITE_ONLY);
+            TransactionProxy.TransactionType.WRITE_ONLY, this);
     }
 
     @Override
     public void close() {
-        // FIXME : The problem here is don't know which shard the transaction chain is to be created on ???
-        throw new UnsupportedOperationException("close - not sure what to do here?");
+        // Send a close transaction chain request to each and every shard
+        actorContext.broadcast(new CloseTransactionChain(transactionChainId));
+    }
+
+    public String getTransactionChainId() {
+        return transactionChainId;
+    }
+
+    public void onTransactionReady(List<Future<ActorPath>> cohortPathFutures){
+        this.cohortPathFutures = cohortPathFutures;
+    }
+
+    public void waitTillCurrentTransactionReady(){
+        try {
+            Await.result(Futures
+                .sequence(this.cohortPathFutures, actorContext.getActorSystem().dispatcher()),
+                actorContext.getOperationDuration());
+        } catch (Exception e) {
+            throw new IllegalStateException("Failed when waiting for transaction on a chain to become ready", e);
+        }
     }
 }
index a8b20c030e1dd34f274ce2e02b56669739824af3..6cf16b44268c6c16e26e0658632f61994ee33971 100644 (file)
@@ -71,6 +71,11 @@ import java.util.concurrent.atomic.AtomicLong;
  * </p>
  */
 public class TransactionProxy implements DOMStoreReadWriteTransaction {
+
+    private final TransactionChainProxy transactionChainProxy;
+
+
+
     public enum TransactionType {
         READ_ONLY,
         WRITE_ONLY,
@@ -177,12 +182,27 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
     private boolean inReadyState;
 
     public TransactionProxy(ActorContext actorContext, TransactionType transactionType) {
+        this(actorContext, transactionType, null);
+    }
+
+    @VisibleForTesting
+    List<Future<Object>> getRecordedOperationFutures() {
+        List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
+        for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
+            recordedOperationFutures.addAll(transactionContext.getRecordedOperationFutures());
+        }
+
+        return recordedOperationFutures;
+    }
+
+    public TransactionProxy(ActorContext actorContext, TransactionType transactionType, TransactionChainProxy transactionChainProxy) {
         this.actorContext = Preconditions.checkNotNull(actorContext,
-                "actorContext should not be null");
+            "actorContext should not be null");
         this.transactionType = Preconditions.checkNotNull(transactionType,
-                "transactionType should not be null");
+            "transactionType should not be null");
         this.schemaContext = Preconditions.checkNotNull(actorContext.getSchemaContext(),
-                "schemaContext should not be null");
+            "schemaContext should not be null");
+        this.transactionChainProxy = transactionChainProxy;
 
         String memberName = actorContext.getCurrentMemberName();
         if(memberName == null){
@@ -190,7 +210,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         }
 
         this.identifier = TransactionIdentifier.builder().memberName(memberName).counter(
-                counter.getAndIncrement()).build();
+            counter.getAndIncrement()).build();
 
         if(transactionType == TransactionType.READ_ONLY) {
             // Read-only Tx's aren't explicitly closed by the client so we create a PhantomReference
@@ -201,21 +221,12 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             remoteTransactionActorsMB = new AtomicBoolean();
 
             TransactionProxyCleanupPhantomReference cleanup =
-                                              new TransactionProxyCleanupPhantomReference(this);
+                new TransactionProxyCleanupPhantomReference(this);
             phantomReferenceCache.put(cleanup, cleanup);
         }
-
-        LOG.debug("Created txn {} of type {}", identifier, transactionType);
-    }
-
-    @VisibleForTesting
-    List<Future<Object>> getRecordedOperationFutures() {
-        List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
-        for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
-            recordedOperationFutures.addAll(transactionContext.getRecordedOperationFutures());
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Created txn {} of type {}", identifier, transactionType);
         }
-
-        return recordedOperationFutures;
     }
 
     @Override
@@ -225,8 +236,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
                 "Read operation on write-only transaction is not allowed");
 
-        LOG.debug("Tx {} read {}", identifier, path);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} read {}", identifier, path);
+        }
         createTransactionIfMissing(actorContext, path);
 
         return transactionContext(path).readData(path);
@@ -238,8 +250,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
                 "Exists operation on write-only transaction is not allowed");
 
-        LOG.debug("Tx {} exists {}", identifier, path);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} exists {}", identifier, path);
+        }
         createTransactionIfMissing(actorContext, path);
 
         return transactionContext(path).dataExists(path);
@@ -257,8 +270,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         checkModificationState();
 
-        LOG.debug("Tx {} write {}", identifier, path);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} write {}", identifier, path);
+        }
         createTransactionIfMissing(actorContext, path);
 
         transactionContext(path).writeData(path, data);
@@ -269,8 +283,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         checkModificationState();
 
-        LOG.debug("Tx {} merge {}", identifier, path);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} merge {}", identifier, path);
+        }
         createTransactionIfMissing(actorContext, path);
 
         transactionContext(path).mergeData(path, data);
@@ -280,9 +295,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
     public void delete(YangInstanceIdentifier path) {
 
         checkModificationState();
-
-        LOG.debug("Tx {} delete {}", identifier, path);
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} delete {}", identifier, path);
+        }
         createTransactionIfMissing(actorContext, path);
 
         transactionContext(path).deleteData(path);
@@ -295,19 +310,25 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         inReadyState = true;
 
-        LOG.debug("Tx {} Trying to get {} transactions ready for commit", identifier,
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Tx {} Trying to get {} transactions ready for commit", identifier,
                 remoteTransactionPaths.size());
-
+        }
         List<Future<ActorPath>> cohortPathFutures = Lists.newArrayList();
 
         for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
 
-            LOG.debug("Tx {} Readying transaction for shard {}", identifier,
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} Readying transaction for shard {}", identifier,
                     transactionContext.getShardName());
-
+            }
             cohortPathFutures.add(transactionContext.readyTransaction());
         }
 
+        if(transactionChainProxy != null){
+            transactionChainProxy.onTransactionReady(cohortPathFutures);
+        }
+
         return new ThreePhaseCommitCohortProxy(actorContext, cohortPathFutures,
                 identifier.toString());
     }
@@ -340,31 +361,39 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         return ShardStrategyFactory.getStrategy(path).findShard(path);
     }
 
-    private void createTransactionIfMissing(ActorContext actorContext, YangInstanceIdentifier path) {
+    private void createTransactionIfMissing(ActorContext actorContext,
+        YangInstanceIdentifier path) {
+
+        if(transactionChainProxy != null){
+            transactionChainProxy.waitTillCurrentTransactionReady();
+        }
+
         String shardName = ShardStrategyFactory.getStrategy(path).findShard(path);
 
         TransactionContext transactionContext =
             remoteTransactionPaths.get(shardName);
 
-        if(transactionContext != null){
+        if (transactionContext != null) {
             // A transaction already exists with that shard
             return;
         }
 
         try {
             Object response = actorContext.executeShardOperation(shardName,
-                new CreateTransaction(identifier.toString(),this.transactionType.ordinal() ).toSerializable());
+                new CreateTransaction(identifier.toString(), this.transactionType.ordinal(),
+                    getTransactionChainId()).toSerializable());
             if (response.getClass().equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
                 CreateTransactionReply reply =
                     CreateTransactionReply.fromSerializable(response);
 
                 String transactionPath = reply.getTransactionPath();
 
-                LOG.debug("Tx {} Received transaction path = {}", identifier, transactionPath);
-
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("Tx {} Received transaction path = {}", identifier, transactionPath);
+                }
                 ActorSelection transactionActor = actorContext.actorSelection(transactionPath);
 
-                if(transactionType == TransactionType.READ_ONLY) {
+                if (transactionType == TransactionType.READ_ONLY) {
                     // Add the actor to the remoteTransactionActors list for access by the
                     // cleanup PhantonReference.
                     remoteTransactionActors.add(transactionActor);
@@ -375,19 +404,30 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
                 }
 
                 transactionContext = new TransactionContextImpl(shardName, transactionPath,
-                        transactionActor, identifier, actorContext, schemaContext);
+                    transactionActor, identifier, actorContext, schemaContext);
 
                 remoteTransactionPaths.put(shardName, transactionContext);
             } else {
                 throw new IllegalArgumentException(String.format(
-                        "Invalid reply type {} for CreateTransaction", response.getClass()));
+                    "Invalid reply type {} for CreateTransaction", response.getClass()));
             }
-        } catch(Exception e){
-            LOG.debug("Tx {} Creating NoOpTransaction because of : {}", identifier, e.getMessage());
-            remoteTransactionPaths.put(shardName, new NoOpTransactionContext(shardName, e, identifier));
+        } catch (Exception e) {
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} Creating NoOpTransaction because of : {}", identifier, e.getMessage());
+            }
+            remoteTransactionPaths
+                .put(shardName, new NoOpTransactionContext(shardName, e, identifier));
+        }
+    }
+
+    public String getTransactionChainId() {
+        if(transactionChainProxy == null){
+            return "";
         }
+        return transactionChainProxy.getTransactionChainId();
     }
 
+
     private interface TransactionContext {
         String getShardName();
 
@@ -459,15 +499,18 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         @Override
         public void closeTransaction() {
-            LOG.debug("Tx {} closeTransaction called", identifier);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} closeTransaction called", identifier);
+            }
             actorContext.sendRemoteOperationAsync(getActor(), new CloseTransaction().toSerializable());
         }
 
         @Override
         public Future<ActorPath> readyTransaction() {
-            LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
                     identifier, recordedOperationFutures.size());
-
+            }
             // Send the ReadyTransaction message to the Tx actor.
 
             final Future<Object> replyFuture = actorContext.executeRemoteOperationAsync(getActor(),
@@ -492,10 +535,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             return combinedFutures.transform(new AbstractFunction1<Iterable<Object>, ActorPath>() {
                 @Override
                 public ActorPath apply(Iterable<Object> notUsed) {
-
-                    LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
+                    if(LOG.isDebugEnabled()) {
+                        LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
                             identifier);
-
+                    }
                     // At this point all the Futures succeeded and we need to extract the cohort
                     // actor path from the ReadyTransactionReply. For the recorded operations, they
                     // don't return any data so we're only interested that they completed
@@ -513,9 +556,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
                         String resolvedCohortPath = getResolvedCohortPath(
                                 reply.getCohortPath().toString());
 
-                        LOG.debug("Tx {} readyTransaction: resolved cohort path {}",
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug("Tx {} readyTransaction: resolved cohort path {}",
                                 identifier, resolvedCohortPath);
-
+                        }
                         return actorContext.actorFor(resolvedCohortPath);
                     } else {
                         // Throwing an exception here will fail the Future.
@@ -529,21 +573,27 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         @Override
         public void deleteData(YangInstanceIdentifier path) {
-            LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+            }
             recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
                     new DeleteData(path).toSerializable() ));
         }
 
         @Override
         public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-            LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+            }
             recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
                     new MergeData(path, data, schemaContext).toSerializable()));
         }
 
         @Override
         public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-            LOG.debug("Tx {} writeData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} writeData called path = {}", identifier, path);
+            }
             recordedOperationFutures.add(actorContext.executeRemoteOperationAsync(getActor(),
                     new WriteData(path, data, schemaContext).toSerializable()));
         }
@@ -552,8 +602,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
                 final YangInstanceIdentifier path) {
 
-            LOG.debug("Tx {} readData called path = {}", identifier, path);
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} readData called path = {}", identifier, path);
+            }
             final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture = SettableFuture.create();
 
             // If there were any previous recorded put/merge/delete operation reply Futures then we
@@ -563,9 +614,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             if(recordedOperationFutures.isEmpty()) {
                 finishReadData(path, returnFuture);
             } else {
-                LOG.debug("Tx {} readData: verifying {} previous recorded operations",
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("Tx {} readData: verifying {} previous recorded operations",
                         identifier, recordedOperationFutures.size());
-
+                }
                 // Note: we make a copy of recordedOperationFutures to be on the safe side in case
                 // Futures#sequence accesses the passed List on a different thread, as
                 // recordedOperationFutures is not synchronized.
@@ -578,9 +630,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
                     public void onComplete(Throwable failure, Iterable<Object> notUsed)
                             throws Throwable {
                         if(failure != null) {
-                            LOG.debug("Tx {} readData: a recorded operation failed: {}",
+                            if(LOG.isDebugEnabled()) {
+                                LOG.debug("Tx {} readData: a recorded operation failed: {}",
                                     identifier, failure);
-
+                            }
                             returnFuture.setException(new ReadFailedException(
                                     "The read could not be performed because a previous put, merge,"
                                     + "or delete operation failed", failure));
@@ -599,20 +652,23 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         private void finishReadData(final YangInstanceIdentifier path,
                 final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture) {
 
-            LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
+            }
             OnComplete<Object> onComplete = new OnComplete<Object>() {
                 @Override
                 public void onComplete(Throwable failure, Object readResponse) throws Throwable {
                     if(failure != null) {
-                        LOG.debug("Tx {} read operation failed: {}", identifier, failure);
-
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug("Tx {} read operation failed: {}", identifier, failure);
+                        }
                         returnFuture.setException(new ReadFailedException(
                                 "Error reading data for path " + path, failure));
 
                     } else {
-                        LOG.debug("Tx {} read operation succeeded", identifier, failure);
-
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug("Tx {} read operation succeeded", identifier, failure);
+                        }
                         if (readResponse.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
                             ReadDataReply reply = ReadDataReply.fromSerializable(schemaContext,
                                     path, readResponse);
@@ -639,8 +695,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         public CheckedFuture<Boolean, ReadFailedException> dataExists(
                 final YangInstanceIdentifier path) {
 
-            LOG.debug("Tx {} dataExists called path = {}", identifier, path);
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+            }
             final SettableFuture<Boolean> returnFuture = SettableFuture.create();
 
             // If there were any previous recorded put/merge/delete operation reply Futures then we
@@ -651,9 +708,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             if(recordedOperationFutures.isEmpty()) {
                 finishDataExists(path, returnFuture);
             } else {
-                LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
+                if(LOG.isDebugEnabled()) {
+                    LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
                         identifier, recordedOperationFutures.size());
-
+                }
                 // Note: we make a copy of recordedOperationFutures to be on the safe side in case
                 // Futures#sequence accesses the passed List on a different thread, as
                 // recordedOperationFutures is not synchronized.
@@ -666,9 +724,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
                     public void onComplete(Throwable failure, Iterable<Object> notUsed)
                             throws Throwable {
                         if(failure != null) {
-                            LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
+                            if(LOG.isDebugEnabled()) {
+                                LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
                                     identifier, failure);
-
+                            }
                             returnFuture.setException(new ReadFailedException(
                                     "The data exists could not be performed because a previous "
                                     + "put, merge, or delete operation failed", failure));
@@ -687,19 +746,22 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         private void finishDataExists(final YangInstanceIdentifier path,
                 final SettableFuture<Boolean> returnFuture) {
 
-            LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
+            }
             OnComplete<Object> onComplete = new OnComplete<Object>() {
                 @Override
                 public void onComplete(Throwable failure, Object response) throws Throwable {
                     if(failure != null) {
-                        LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
-
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
+                        }
                         returnFuture.setException(new ReadFailedException(
                                 "Error checking data exists for path " + path, failure));
                     } else {
-                        LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
-
+                        if(LOG.isDebugEnabled()) {
+                            LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
+                        }
                         if (response.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
                             returnFuture.set(Boolean.valueOf(DataExistsReply.
                                         fromSerializable(response).exists()));
@@ -731,34 +793,46 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         @Override
         public void closeTransaction() {
-            LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
+            }
         }
 
         @Override
         public Future<ActorPath> readyTransaction() {
-            LOG.debug("Tx {} readyTransaction called", identifier);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} readyTransaction called", identifier);
+            }
             return akka.dispatch.Futures.failed(failure);
         }
 
         @Override
         public void deleteData(YangInstanceIdentifier path) {
-            LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+            }
         }
 
         @Override
         public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-            LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+            }
         }
 
         @Override
         public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-            LOG.debug("Tx {} writeData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} writeData called path = {}", identifier, path);
+            }
         }
 
         @Override
         public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
             YangInstanceIdentifier path) {
-            LOG.debug("Tx {} readData called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} readData called path = {}", identifier, path);
+            }
             return Futures.immediateFailedCheckedFuture(new ReadFailedException(
                     "Error reading data for path " + path, failure));
         }
@@ -766,7 +840,9 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         @Override
         public CheckedFuture<Boolean, ReadFailedException> dataExists(
             YangInstanceIdentifier path) {
-            LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+            }
             return Futures.immediateFailedCheckedFuture(new ReadFailedException(
                     "Error checking exists for path " + path, failure));
         }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/AbstractBaseMBean.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/AbstractBaseMBean.java
deleted file mode 100644 (file)
index e69de29..0000000
index 0a1964b0533bfc7ead91025e5792f5edda85b844..0959c2a95949a6332fd66859f0796e103746c5d8 100644 (file)
@@ -74,16 +74,14 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
     }
 
     public void setDataStoreExecutor(ExecutorService dsExecutor) {
-        this.dataStoreExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(dsExecutor,
-                "notification-executor", getMBeanType(), getMBeanCategory());
+        this.dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dsExecutor);
     }
 
     public void setNotificationManager(QueuedNotificationManager<?, ?> manager) {
         this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
                 "notification-manager", getMBeanType(), getMBeanCategory());
 
-        this.notificationExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(manager.getExecutor(),
-                "data-store-executor", getMBeanType(), getMBeanCategory());
+        this.notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor());
     }
 
     @Override
@@ -230,7 +228,8 @@ public class ShardStats extends AbstractMXBean implements ShardStatsMXBean {
 
     @Override
     public ThreadExecutorStats getDataStoreExecutorStats() {
-        return dataStoreExecutorStatsBean.toThreadExecutorStats();
+        return dataStoreExecutorStatsBean == null ? null :
+                                        dataStoreExecutorStatsBean.toThreadExecutorStats();
     }
 
     @Override
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStatsMBean.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStatsMBean.java
deleted file mode 100644 (file)
index e69de29..0000000
index efa51fde2090c3762f8d32e5d75b2a4a50c77757..74de6c5aeacd3a39e777512980745d9190d7838c 100644 (file)
@@ -10,10 +10,29 @@ package org.opendaylight.controller.cluster.datastore.messages;
 
 import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages;
 
-public class CloseTransactionChain implements SerializableMessage{
-  public static final Class SERIALIZABLE_CLASS = ShardTransactionChainMessages.CloseTransactionChain.class;
-  @Override
-  public Object toSerializable() {
-    return ShardTransactionChainMessages.CloseTransactionChain.newBuilder().build();
-  }
+public class CloseTransactionChain implements SerializableMessage {
+    public static final Class SERIALIZABLE_CLASS =
+        ShardTransactionChainMessages.CloseTransactionChain.class;
+    private final String transactionChainId;
+
+    public CloseTransactionChain(String transactionChainId){
+        this.transactionChainId = transactionChainId;
+    }
+
+    @Override
+    public Object toSerializable() {
+        return ShardTransactionChainMessages.CloseTransactionChain.newBuilder()
+            .setTransactionChainId(transactionChainId).build();
+    }
+
+    public static CloseTransactionChain fromSerializable(Object message){
+        ShardTransactionChainMessages.CloseTransactionChain closeTransactionChain
+            = (ShardTransactionChainMessages.CloseTransactionChain) message;
+
+        return new CloseTransactionChain(closeTransactionChain.getTransactionChainId());
+    }
+
+    public String getTransactionChainId() {
+        return transactionChainId;
+    }
 }
index d5c9e21611af20df37bb1999d00ead9a44495fda..361d406ac80dda74fe33950ace971ece600b3d7a 100644 (file)
@@ -13,30 +13,48 @@ import org.opendaylight.controller.protobuff.messages.transaction.ShardTransacti
 
 
 public class CreateTransaction implements SerializableMessage {
-  public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.CreateTransaction.class;
-  private final String transactionId;
-  private final int transactionType;
-
-  public CreateTransaction(String transactionId, int transactionType){
-
-    this.transactionId = transactionId;
-    this.transactionType = transactionType;
-  }
-
-  public String getTransactionId() {
-    return transactionId;
-  }
-
-  public int getTransactionType() { return transactionType;}
-
-  @Override
-  public Object toSerializable() {
-    return  ShardTransactionMessages.CreateTransaction.newBuilder().setTransactionId(transactionId).setTransactionType(transactionType).build();
-  }
-
-  public static CreateTransaction fromSerializable(Object message){
-    ShardTransactionMessages.CreateTransaction createTransaction = (ShardTransactionMessages.CreateTransaction)message;
-    return new CreateTransaction(createTransaction.getTransactionId(),createTransaction.getTransactionType() );
-  }
-
+    public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.CreateTransaction.class;
+    private final String transactionId;
+    private final int transactionType;
+    private final String transactionChainId;
+
+    public CreateTransaction(String transactionId, int transactionType) {
+        this(transactionId, transactionType, "");
+    }
+
+    public CreateTransaction(String transactionId, int transactionType, String transactionChainId) {
+
+        this.transactionId = transactionId;
+        this.transactionType = transactionType;
+        this.transactionChainId = transactionChainId;
+
+    }
+
+
+    public String getTransactionId() {
+        return transactionId;
+    }
+
+    public int getTransactionType() {
+        return transactionType;
+    }
+
+    @Override
+    public Object toSerializable() {
+        return ShardTransactionMessages.CreateTransaction.newBuilder()
+            .setTransactionId(transactionId)
+            .setTransactionType(transactionType)
+            .setTransactionChainId(transactionChainId).build();
+    }
+
+    public static CreateTransaction fromSerializable(Object message) {
+        ShardTransactionMessages.CreateTransaction createTransaction =
+            (ShardTransactionMessages.CreateTransaction) message;
+        return new CreateTransaction(createTransaction.getTransactionId(),
+            createTransaction.getTransactionType(), createTransaction.getTransactionChainId());
+    }
+
+    public String getTransactionChainId() {
+        return transactionChainId;
+    }
 }
index b87dc4f608b191c21d5fa34b8bf4a9744864f96a..8ba333d2799a5177c7b1b8b5b09ab6b4ec87d126 100644 (file)
@@ -13,8 +13,8 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
 import akka.actor.PoisonPill;
+import akka.pattern.Patterns;
 import akka.util.Timeout;
-
 import org.opendaylight.controller.cluster.datastore.ClusterWrapper;
 import org.opendaylight.controller.cluster.datastore.Configuration;
 import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
@@ -27,7 +27,6 @@ import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContex
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 import scala.concurrent.duration.Duration;
@@ -126,8 +125,9 @@ public class ActorContext {
         if (result instanceof LocalShardFound) {
             LocalShardFound found = (LocalShardFound) result;
 
-            LOG.debug("Local shard found {}", found.getPath());
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Local shard found {}", found.getPath());
+            }
             return found.getPath();
         }
 
@@ -142,8 +142,9 @@ public class ActorContext {
         if (result.getClass().equals(PrimaryFound.SERIALIZABLE_CLASS)) {
             PrimaryFound found = PrimaryFound.fromSerializable(result);
 
-            LOG.debug("Primary found {}", found.getPrimaryPath());
-
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Primary found {}", found.getPrimaryPath());
+            }
             return found.getPrimaryPath();
         }
         throw new PrimaryNotFoundException("Could not find primary for shardName " + shardName);
@@ -176,8 +177,10 @@ public class ActorContext {
      */
     public Object executeRemoteOperation(ActorSelection actor, Object message) {
 
-        LOG.debug("Sending remote message {} to {}", message.getClass().toString(), actor.toString());
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Sending remote message {} to {}", message.getClass().toString(),
+                actor.toString());
+        }
         Future<Object> future = ask(actor, message, operationTimeout);
 
         try {
@@ -197,8 +200,9 @@ public class ActorContext {
      */
     public Future<Object> executeRemoteOperationAsync(ActorSelection actor, Object message) {
 
-        LOG.debug("Sending remote message {} to {}", message.getClass().toString(), actor.toString());
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Sending remote message {} to {}", message.getClass().toString(), actor.toString());
+        }
         return ask(actor, message, operationTimeout);
     }
 
@@ -213,6 +217,13 @@ public class ActorContext {
         actor.tell(message, ActorRef.noSender());
     }
 
+    public void sendShardOperationAsync(String shardName, Object message) {
+        ActorSelection primary = findPrimary(shardName);
+
+        primary.tell(message, ActorRef.noSender());
+    }
+
+
     /**
      * Execute an operation on the primary for a given shard
      * <p>
@@ -258,6 +269,30 @@ public class ActorContext {
     }
 
 
+    /**
+     * Execute an operation on the the local shard only asynchronously
+     *
+     * <p>
+     *     This method first finds the address of the local shard if any. It then
+     *     executes the operation on it.
+     * </p>
+     *
+     * @param shardName the name of the shard on which the operation needs to be executed
+     * @param message the message that needs to be sent to the shard
+     * @param timeout the amount of time that this method should wait for a response before timing out
+     * @return null if the shard could not be located else a future on which the caller can wait
+     *
+     */
+    public Future executeLocalShardOperationAsync(String shardName, Object message, Timeout timeout) {
+        ActorRef local = findLocalShard(shardName);
+        if(local == null){
+            return null;
+        }
+        return Patterns.ask(local, message, timeout);
+    }
+
+
+
     public void shutdown() {
         shardManager.tell(PoisonPill.getInstance(), null);
         actorSystem.shutdown();
@@ -295,4 +330,22 @@ public class ActorContext {
         return clusterWrapper.getCurrentMemberName();
     }
 
+    /**
+     * Send the message to each and every shard
+     *
+     * @param message
+     */
+    public void broadcast(Object message){
+        for(String shardName : configuration.getAllShardNames()){
+            try {
+                sendShardOperationAsync(shardName, message);
+            } catch(Exception e){
+                LOG.warn("broadcast failed to send message " +  message.getClass().getSimpleName() + " to shard " + shardName, e);
+            }
+        }
+    }
+
+    public FiniteDuration getOperationDuration() {
+        return operationDuration;
+    }
 }
index c29f93bb073cb712df045bf4839cc247a2b49111..6be6cda5d3d741de221190e55fcf71d042e4c24a 100644 (file)
@@ -1,10 +1,13 @@
 
 odl-cluster-data {
   bounded-mailbox {
-    mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
     mailbox-push-timeout-time = 100ms
   }
+
+  metric-capture-enabled = true
+
   akka {
     loggers = ["akka.event.slf4j.Slf4jLogger"]
     cluster {
index 82bc5e29bc98465624ad181a6e74b06942e9ed1b..e19a76703f69f61ec8df33decddf483cfc6e7192 100644 (file)
@@ -41,13 +41,13 @@ module distributed-datastore-provider {
             range "1..max";
         }
     }
-    
+
     typedef operation-timeout-type {
         type uint16 {
             range "5..max";
         }
     }
-    
+
     grouping data-store-properties {
         leaf max-shard-data-change-executor-queue-size {
             default 1000;
@@ -72,20 +72,32 @@ module distributed-datastore-provider {
             type non-zero-uint16-type;
             description "The maximum queue size for each shard's data store executor.";
          }
-            
+
          leaf shard-transaction-idle-timeout-in-minutes {
             default 10;
             type non-zero-uint16-type;
             description "The maximum amount of time a shard transaction can be idle without receiving any messages before it self-destructs.";
          }
-         
+
          leaf operation-timeout-in-seconds {
             default 5;
             type operation-timeout-type;
             description "The maximum amount of time for akka operations (remote or local) to complete before failing.";
          }
+
+         leaf enable-metric-capture {
+            default false;
+            type boolean;
+            description "Enable or disable metric capture.";
+         }
+
+         leaf bounded-mailbox-capacity {
+             default 1000;
+             type non-zero-uint16-type;
+             description "Max queue size that an actor's mailbox can reach";
+         }
     }
-    
+
     // Augments the 'configuration' choice node under modules/module.
     augment "/config:modules/config:module/config:configuration" {
         case distributed-config-datastore-provider {
index 4c550a768cce258e3a151139f753751281b439d6..022ef9bbafef949921ec24041357cff64013ea12 100644 (file)
@@ -25,12 +25,16 @@ public abstract class AbstractActorTest {
 
         System.setProperty("shard.persistent", "false");
         system = ActorSystem.create("test");
+
+        deletePersistenceFiles();
     }
 
     @AfterClass
     public static void tearDownClass() throws IOException {
         JavaTestKit.shutdownActorSystem(system);
         system = null;
+
+        deletePersistenceFiles();
     }
 
     protected static void deletePersistenceFiles() throws IOException {
index 50367e66ce3b759325d20228a63d0f7c2eeaab5d..7b826302f588ad76be85f54c9e17bcc1c8dff56a 100644 (file)
@@ -14,13 +14,10 @@ import akka.actor.ActorSelection;
 import akka.actor.Props;
 import akka.event.Logging;
 import akka.testkit.JavaTestKit;
-
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChain;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChainReply;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.PreCommitTransactionReply;
@@ -32,7 +29,6 @@ import org.opendaylight.controller.cluster.datastore.messages.WriteDataReply;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
 import scala.concurrent.Await;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
@@ -87,31 +83,8 @@ public class BasicIntegrationTest extends AbstractActorTest {
 
                     assertEquals(true, result);
 
-                    // 1. Create a TransactionChain
-                    shard.tell(new CreateTransactionChain().toSerializable(), getRef());
-
-                    final ActorSelection transactionChain =
-                        new ExpectMsg<ActorSelection>(duration("3 seconds"), "CreateTransactionChainReply") {
-                            @Override
-                            protected ActorSelection match(Object in) {
-                                if (in.getClass().equals(CreateTransactionChainReply.SERIALIZABLE_CLASS)) {
-                                    ActorPath transactionChainPath =
-                                        CreateTransactionChainReply.fromSerializable(getSystem(),in)
-                                            .getTransactionChainPath();
-                                    return getSystem()
-                                        .actorSelection(transactionChainPath);
-                                } else {
-                                    throw noMatch();
-                                }
-                            }
-                        }.get(); // this extracts the received message
-
-                    assertNotNull(transactionChain);
-
-                    System.out.println("Successfully created transaction chain");
-
-                    // 2. Create a Transaction on the TransactionChain
-                    transactionChain.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.WRITE_ONLY.ordinal() ).toSerializable(), getRef());
+                    // Create a transaction on the shard
+                    shard.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.WRITE_ONLY.ordinal() ).toSerializable(), getRef());
 
                     final ActorSelection transaction =
                         new ExpectMsg<ActorSelection>(duration("3 seconds"), "CreateTransactionReply") {
index be43911fe12f26caca64c82b0cfc7ca43d8e505d..04d889fbe0f61eaa822b9b286c9527ad1b1e5447 100644 (file)
@@ -7,9 +7,10 @@ import org.opendaylight.controller.cluster.datastore.modification.MutableComposi
 import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.protobuff.messages.cluster.raft.AppendEntriesMessages;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
 
 import java.io.File;
index 17329611b00d6b010302eaa9d9ecf503972eb7e0..8c253596b8b752c264f81cced7090087fc02879b 100644 (file)
@@ -7,6 +7,7 @@ import org.junit.Test;
 
 import java.io.File;
 import java.util.List;
+import java.util.Set;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -83,4 +84,15 @@ public class ConfigurationImplTest {
         File f = new File("./module-shards.conf");
         ConfigFactory.parseFile(f);
     }
+
+    @Test
+    public void testGetAllShardNames(){
+        Set<String> allShardNames = configuration.getAllShardNames();
+
+        assertEquals(4, allShardNames.size());
+        assertTrue(allShardNames.contains("default"));
+        assertTrue(allShardNames.contains("people-1"));
+        assertTrue(allShardNames.contains("cars-1"));
+        assertTrue(allShardNames.contains("test-1"));
+    }
 }
index 3d0aaa0082e55e8b73976af4637977dd9111c97d..ab3ff795d3cb4a4e66e3ddd708236a8d15eec365 100644 (file)
@@ -17,6 +17,10 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
 import java.util.List;
 
+import static junit.framework.TestCase.assertEquals;
+import static junit.framework.TestCase.assertNotNull;
+import static junit.framework.TestCase.assertTrue;
+
 public class DataChangeListenerRegistrationProxyTest extends AbstractActorTest{
 
     private ActorRef dataChangeListenerActor = getSystem().actorOf(Props.create(DoNothingActor.class));
@@ -64,14 +68,41 @@ public class DataChangeListenerRegistrationProxyTest extends AbstractActorTest{
         Object messages = testContext
             .executeLocalOperation(actorRef, "messages");
 
-        Assert.assertNotNull(messages);
+        assertNotNull(messages);
 
-        Assert.assertTrue(messages instanceof List);
+        assertTrue(messages instanceof List);
 
         List<Object> listMessages = (List<Object>) messages;
 
-        Assert.assertEquals(1, listMessages.size());
+        assertEquals(1, listMessages.size());
+
+        assertTrue(listMessages.get(0).getClass()
+            .equals(CloseDataChangeListenerRegistration.SERIALIZABLE_CLASS));
+    }
+
+    @Test
+    public void testCloseWhenRegistrationIsNull() throws Exception {
+        final Props props = Props.create(MessageCollectorActor.class);
+        final ActorRef actorRef = getSystem().actorOf(props);
+
+        DataChangeListenerRegistrationProxy proxy =
+            new DataChangeListenerRegistrationProxy(
+                new MockDataChangeListener(), dataChangeListenerActor);
+
+        proxy.close();
+
+        //Check if it was received by the remote actor
+        ActorContext
+            testContext = new ActorContext(getSystem(), getSystem().actorOf(Props.create(DoNothingActor.class)),new MockClusterWrapper(), new MockConfiguration());
+        Object messages = testContext
+            .executeLocalOperation(actorRef, "messages");
+
+        assertNotNull(messages);
+
+        assertTrue(messages instanceof List);
+
+        List<Object> listMessages = (List<Object>) messages;
 
-        Assert.assertTrue(listMessages.get(0).getClass().equals(CloseDataChangeListenerRegistration.SERIALIZABLE_CLASS));
+        assertEquals(0, listMessages.size());
     }
 }
index 8a7b50d20c9003682b0ec6a2626b1c342ea73a03..ec8aee2b09d4dfaf4ed4b5b732a9168783beb73c 100644 (file)
@@ -21,6 +21,7 @@ import org.opendaylight.controller.md.cluster.datastore.model.SchemaContextHelpe
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
 
@@ -144,6 +145,94 @@ public class DistributedDataStoreIntegrationTest {
 
     }
 
+    @Test
+    public void transactionChainIntegrationTest() throws Exception {
+        final Configuration configuration = new ConfigurationImpl("module-shards.conf", "modules.conf");
+        ShardStrategyFactory.setConfiguration(configuration);
+
+
+
+        new JavaTestKit(getSystem()) {
+            {
+
+                new Within(duration("10 seconds")) {
+                    @Override
+                    protected void run() {
+                        try {
+                            final DistributedDataStore distributedDataStore =
+                                new DistributedDataStore(getSystem(), "config",
+                                    new MockClusterWrapper(), configuration,
+                                    new DatastoreContext());
+
+                            distributedDataStore.onGlobalContextUpdated(TestModel.createTestContext());
+
+                            // Wait for a specific log message to show up
+                            final boolean result =
+                                new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
+                                ) {
+                                    @Override
+                                    protected Boolean run() {
+                                        return true;
+                                    }
+                                }.from("akka://test/user/shardmanager-config/member-1-shard-test-1-config")
+                                    .message("Switching from state Candidate to Leader")
+                                    .occurrences(1).exec();
+
+                            assertEquals(true, result);
+
+                            DOMStoreTransactionChain transactionChain =
+                                distributedDataStore.createTransactionChain();
+
+                            DOMStoreReadWriteTransaction transaction =
+                                transactionChain.newReadWriteTransaction();
+
+                            transaction
+                                .write(TestModel.TEST_PATH, ImmutableNodes
+                                    .containerNode(TestModel.TEST_QNAME));
+
+                            ListenableFuture<Optional<NormalizedNode<?, ?>>>
+                                future =
+                                transaction.read(TestModel.TEST_PATH);
+
+                            Optional<NormalizedNode<?, ?>> optional =
+                                future.get();
+
+                            Assert.assertTrue("Node not found", optional.isPresent());
+
+                            NormalizedNode<?, ?> normalizedNode =
+                                optional.get();
+
+                            assertEquals(TestModel.TEST_QNAME,
+                                normalizedNode.getNodeType());
+
+                            DOMStoreThreePhaseCommitCohort ready =
+                                transaction.ready();
+
+                            ListenableFuture<Boolean> canCommit =
+                                ready.canCommit();
+
+                            assertTrue(canCommit.get(5, TimeUnit.SECONDS));
+
+                            ListenableFuture<Void> preCommit =
+                                ready.preCommit();
+
+                            preCommit.get(5, TimeUnit.SECONDS);
+
+                            ListenableFuture<Void> commit = ready.commit();
+
+                            commit.get(5, TimeUnit.SECONDS);
+
+                            transactionChain.close();
+                        } catch (ExecutionException | TimeoutException | InterruptedException e){
+                            fail(e.getMessage());
+                        }
+                    }
+                };
+            }
+        };
+
+    }
+
 
     //FIXME : Disabling test because it's flaky
     //@Test
index aeb47de888564f90830dc26a05c1ead1e66a78c1..08c3ea9602adb9cd891f9e1fe573ded671e5d6d7 100644 (file)
@@ -1,14 +1,20 @@
 package org.opendaylight.controller.cluster.datastore;
 
+import akka.actor.ActorPath;
 import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
 import akka.actor.Props;
-
+import akka.dispatch.ExecutionContexts;
+import akka.dispatch.Futures;
+import akka.util.Timeout;
+import com.google.common.util.concurrent.MoreExecutors;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
 import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
 import org.opendaylight.controller.cluster.datastore.utils.MockActorContext;
 import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
@@ -24,13 +30,23 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import scala.concurrent.ExecutionContextExecutor;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.concurrent.TimeUnit;
 
+import static junit.framework.TestCase.assertEquals;
+import static junit.framework.TestCase.assertNull;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anyString;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 public class DistributedDataStoreTest extends AbstractActorTest{
 
@@ -95,20 +111,108 @@ public class DistributedDataStoreTest extends AbstractActorTest{
 
     @Test
     public void testRegisterChangeListenerWhenShardIsLocal() throws Exception {
+        ActorContext actorContext = mock(ActorContext.class);
+
+        distributedDataStore = new DistributedDataStore(actorContext);
+        distributedDataStore.onGlobalContextUpdated(TestModel.createTestContext());
 
-        mockActorContext.setExecuteLocalShardOperationResponse(new RegisterChangeListenerReply(doNothingActorRef.path()));
+        Future future = mock(Future.class);
+        when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS));
+        when(actorContext.getActorSystem()).thenReturn(getSystem());
+        when(actorContext
+            .executeLocalShardOperationAsync(anyString(), anyObject(), any(Timeout.class))).thenReturn(future);
 
         ListenerRegistration registration =
-            distributedDataStore.registerChangeListener(TestModel.TEST_PATH, new AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>() {
-                @Override
-                public void onDataChanged(AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change) {
-                    throw new UnsupportedOperationException("onDataChanged");
-                }
-            }, AsyncDataBroker.DataChangeScope.BASE);
+            distributedDataStore.registerChangeListener(TestModel.TEST_PATH,
+                mock(AsyncDataChangeListener.class),
+                AsyncDataBroker.DataChangeScope.BASE);
 
-        assertTrue(registration instanceof DataChangeListenerRegistrationProxy);
+        assertNotNull(registration);
+
+        assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass());
+    }
+
+    @Test
+    public void testRegisterChangeListenerWhenSuccessfulReplyReceived() throws Exception {
+        ActorContext actorContext = mock(ActorContext.class);
+
+        distributedDataStore = new DistributedDataStore(actorContext);
+        distributedDataStore.onGlobalContextUpdated(
+            TestModel.createTestContext());
+
+        ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.sameThreadExecutor());
+
+        // Make Future successful
+        Future f = Futures.successful(new RegisterChangeListenerReply(doNothingActorRef.path()));
+
+        // Setup the mocks
+        ActorSystem actorSystem = mock(ActorSystem.class);
+        ActorSelection actorSelection = mock(ActorSelection.class);
+
+        when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS));
+        when(actorSystem.dispatcher()).thenReturn(executor);
+        when(actorSystem.actorOf(any(Props.class))).thenReturn(doNothingActorRef);
+        when(actorContext.getActorSystem()).thenReturn(actorSystem);
+        when(actorContext
+            .executeLocalShardOperationAsync(anyString(), anyObject(), any(Timeout.class))).thenReturn(f);
+        when(actorContext.actorSelection(any(ActorPath.class))).thenReturn(actorSelection);
+
+        ListenerRegistration registration =
+            distributedDataStore.registerChangeListener(TestModel.TEST_PATH,
+                mock(AsyncDataChangeListener.class),
+                AsyncDataBroker.DataChangeScope.BASE);
 
         assertNotNull(registration);
+
+        assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass());
+
+        ActorSelection listenerRegistrationActor =
+            ((DataChangeListenerRegistrationProxy) registration).getListenerRegistrationActor();
+
+        assertNotNull(listenerRegistrationActor);
+
+        assertEquals(actorSelection, listenerRegistrationActor);
+    }
+
+    @Test
+    public void testRegisterChangeListenerWhenSuccessfulReplyFailed() throws Exception {
+        ActorContext actorContext = mock(ActorContext.class);
+
+        distributedDataStore = new DistributedDataStore(actorContext);
+        distributedDataStore.onGlobalContextUpdated(
+            TestModel.createTestContext());
+
+        ExecutionContextExecutor executor = ExecutionContexts.fromExecutor(MoreExecutors.sameThreadExecutor());
+
+        // Make Future fail
+        Future f = Futures.failed(new IllegalArgumentException());
+
+        // Setup the mocks
+        ActorSystem actorSystem = mock(ActorSystem.class);
+        ActorSelection actorSelection = mock(ActorSelection.class);
+
+        when(actorContext.getOperationDuration()).thenReturn(FiniteDuration.apply(5, TimeUnit.SECONDS));
+        when(actorSystem.dispatcher()).thenReturn(executor);
+        when(actorSystem.actorOf(any(Props.class))).thenReturn(doNothingActorRef);
+        when(actorContext.getActorSystem()).thenReturn(actorSystem);
+        when(actorContext
+            .executeLocalShardOperationAsync(anyString(), anyObject(), any(Timeout.class))).thenReturn(f);
+        when(actorContext.actorSelection(any(ActorPath.class))).thenReturn(actorSelection);
+
+        ListenerRegistration registration =
+            distributedDataStore.registerChangeListener(TestModel.TEST_PATH,
+                mock(AsyncDataChangeListener.class),
+                AsyncDataBroker.DataChangeScope.BASE);
+
+        assertNotNull(registration);
+
+        assertEquals(DataChangeListenerRegistrationProxy.class, registration.getClass());
+
+        ActorSelection listenerRegistrationActor =
+            ((DataChangeListenerRegistrationProxy) registration).getListenerRegistrationActor();
+
+        assertNull(listenerRegistrationActor);
+
     }
 
 
index 766dcb72681d3bc5ea945e0232fa25c32f1e76d5..deb71c2df4aa9cc522904e4014ed66d536aa2fa4 100644 (file)
@@ -14,8 +14,6 @@ import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChain;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChainReply;
 import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
@@ -53,65 +51,6 @@ public class ShardTest extends AbstractActorTest {
 
     private static final DatastoreContext DATA_STORE_CONTEXT = new DatastoreContext();
 
-    @Test
-    public void testOnReceiveCreateTransactionChain() throws Exception {
-        new JavaTestKit(getSystem()) {{
-            final ShardIdentifier identifier =
-                ShardIdentifier.builder().memberName("member-1")
-                    .shardName("inventory").type("config").build();
-
-            final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
-            final ActorRef subject =
-                getSystem().actorOf(props, "testCreateTransactionChain");
-
-
-            // Wait for a specific log message to show up
-            final boolean result =
-                new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
-                ) {
-                    @Override
-                    protected Boolean run() {
-                        return true;
-                    }
-                }.from(subject.path().toString())
-                    .message("Switching from state Candidate to Leader")
-                    .occurrences(1).exec();
-
-            Assert.assertEquals(true, result);
-
-            new Within(duration("3 seconds")) {
-                @Override
-                protected void run() {
-
-                    subject.tell(new CreateTransactionChain().toSerializable(), getRef());
-
-                    final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
-                        // do not put code outside this method, will run afterwards
-                        @Override
-                        protected String match(Object in) {
-                            if (in.getClass().equals(CreateTransactionChainReply.SERIALIZABLE_CLASS)){
-                                CreateTransactionChainReply reply =
-                                    CreateTransactionChainReply.fromSerializable(getSystem(),in);
-                                return reply.getTransactionChainPath()
-                                    .toString();
-                            } else {
-                                throw noMatch();
-                            }
-                        }
-                    }.get(); // this extracts the received message
-
-                    assertEquals("Unexpected transaction path " + out,
-                        "akka://test/user/testCreateTransactionChain/$a",
-                        out);
-
-                    expectNoMsg();
-                }
-
-
-            };
-        }};
-    }
-
     @Test
     public void testOnReceiveRegisterListener() throws Exception {
         new JavaTestKit(getSystem()) {{
@@ -233,6 +172,65 @@ public class ShardTest extends AbstractActorTest {
         }};
     }
 
+    @Test
+    public void testCreateTransactionOnChain(){
+        new JavaTestKit(getSystem()) {{
+            final ShardIdentifier identifier =
+                ShardIdentifier.builder().memberName("member-1")
+                    .shardName("inventory").type("config").build();
+
+            final Props props = Shard.props(identifier, Collections.EMPTY_MAP, DATA_STORE_CONTEXT, TestModel.createTestContext());
+            final ActorRef subject =
+                getSystem().actorOf(props, "testCreateTransactionOnChain");
+
+            // Wait for a specific log message to show up
+            final boolean result =
+                new JavaTestKit.EventFilter<Boolean>(Logging.Info.class
+                ) {
+                    @Override
+                    protected Boolean run() {
+                        return true;
+                    }
+                }.from(subject.path().toString())
+                    .message("Switching from state Candidate to Leader")
+                    .occurrences(1).exec();
+
+            Assert.assertEquals(true, result);
+
+            new Within(duration("3 seconds")) {
+                @Override
+                protected void run() {
+
+                    subject.tell(
+                        new UpdateSchemaContext(TestModel.createTestContext()),
+                        getRef());
+
+                    subject.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.READ_ONLY.ordinal() , "foobar").toSerializable(),
+                        getRef());
+
+                    final String out = new ExpectMsg<String>(duration("3 seconds"), "match hint") {
+                        // do not put code outside this method, will run afterwards
+                        @Override
+                        protected String match(Object in) {
+                            if (in instanceof CreateTransactionReply) {
+                                CreateTransactionReply reply =
+                                    (CreateTransactionReply) in;
+                                return reply.getTransactionActorPath()
+                                    .toString();
+                            } else {
+                                throw noMatch();
+                            }
+                        }
+                    }.get(); // this extracts the received message
+
+                    assertTrue("Unexpected transaction path " + out,
+                        out.contains("akka://test/user/testCreateTransactionOnChain/shard-txn-1"));
+                    expectNoMsg();
+                }
+            };
+        }};
+    }
+
     @Test
     public void testPeerAddressResolved(){
         new JavaTestKit(getSystem()) {{
@@ -345,11 +343,16 @@ public class ShardTest extends AbstractActorTest {
                     subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
                         getRef());
 
-                    waitForLogMessage(Logging.Debug.class, subject, "CaptureSnapshotReply received by actor");
+                    waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+
+                    subject.tell(new CaptureSnapshot(-1,-1,-1,-1),
+                        getRef());
+
+                    waitForLogMessage(Logging.Info.class, subject, "CaptureSnapshotReply received by actor");
+
                 }
             };
 
-            Thread.sleep(2000);
             deletePersistenceFiles();
         }};
     }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionChainTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionChainTest.java
deleted file mode 100644 (file)
index c5968c3..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-package org.opendaylight.controller.cluster.datastore;
-
-import akka.actor.ActorRef;
-import akka.actor.Props;
-import akka.testkit.JavaTestKit;
-
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChain;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionChainReply;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
-import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-
-import static org.junit.Assert.assertEquals;
-
-public class ShardTransactionChainTest extends AbstractActorTest {
-
-    private static ListeningExecutorService storeExecutor = MoreExecutors.listeningDecorator(MoreExecutors.sameThreadExecutor());
-
-    private static final InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", storeExecutor,
-            MoreExecutors.sameThreadExecutor());
-
-    private static final SchemaContext testSchemaContext = TestModel.createTestContext();
-
-    private static final DatastoreContext DATA_STORE_CONTEXT = new DatastoreContext();
-
-    private static final String mockShardName = "mockShardName";
-
-    private final ShardStats shardStats = new ShardStats(mockShardName, "DataStore");
-
-    @BeforeClass
-    public static void staticSetup() {
-        store.onGlobalContextUpdated(testSchemaContext);
-    }
-
-    @Test
-    public void testOnReceiveCreateTransaction() throws Exception {
-        new JavaTestKit(getSystem()) {{
-            final Props props = ShardTransactionChain.props(store.createTransactionChain(),
-                    testSchemaContext, DATA_STORE_CONTEXT, shardStats);
-            final ActorRef subject = getSystem().actorOf(props, "testCreateTransaction");
-
-            new Within(duration("1 seconds")) {
-                @Override
-                protected void run() {
-
-                    subject.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.READ_ONLY.ordinal() ).toSerializable(), getRef());
-
-                    final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
-                        // do not put code outside this method, will run afterwards
-                        @Override
-                        protected String match(Object in) {
-                            if (in.getClass().equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
-                                return CreateTransactionReply.fromSerializable(in).getTransactionPath();
-                            }else{
-                                throw noMatch();
-                            }
-                        }
-                    }.get(); // this extracts the received message
-
-                    assertEquals("Unexpected transaction path " + out,
-                            "akka://test/user/testCreateTransaction/shard-txn-1",
-                            out);
-
-                    // Will wait for the rest of the 3 seconds
-                    expectNoMsg();
-                }
-
-
-            };
-        }};
-    }
-
-    @Test
-    public void testOnReceiveCloseTransactionChain() throws Exception {
-        new JavaTestKit(getSystem()) {{
-            final Props props = ShardTransactionChain.props(store.createTransactionChain(),
-                    testSchemaContext, DATA_STORE_CONTEXT, shardStats );
-            final ActorRef subject = getSystem().actorOf(props, "testCloseTransactionChain");
-
-            new Within(duration("1 seconds")) {
-                @Override
-                protected void run() {
-
-                    subject.tell(new CloseTransactionChain().toSerializable(), getRef());
-
-                    final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
-                        // do not put code outside this method, will run afterwards
-                        @Override
-                        protected String match(Object in) {
-                            if (in.getClass().equals(CloseTransactionChainReply.SERIALIZABLE_CLASS)) {
-                                return "match";
-                            } else {
-                                throw noMatch();
-                            }
-                        }
-                    }.get(); // this extracts the received message
-
-                    assertEquals("match", out);
-                    // Will wait for the rest of the 3 seconds
-                    expectNoMsg();
-                }
-
-
-            };
-        }};
-    }
-}
index 93145bdd6d86360070dce5102dbd968c6ebc0629..4cca1bf9ad6698e9daa32409ced7055836cee351 100644 (file)
 
 package org.opendaylight.controller.cluster.datastore;
 
-import static org.mockito.Mockito.doReturn;
-
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
@@ -23,9 +20,15 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
 public class TransactionChainProxyTest {
-    ActorContext actorContext = Mockito.mock(ActorContext.class);
-    SchemaContext schemaContext = Mockito.mock(SchemaContext.class);
+    ActorContext actorContext = mock(ActorContext.class);
+    SchemaContext schemaContext = mock(SchemaContext.class);
 
     @Before
     public void setUp() {
@@ -57,8 +60,12 @@ public class TransactionChainProxyTest {
 
     }
 
-    @Test(expected=UnsupportedOperationException.class)
+    @Test
     public void testClose() throws Exception {
-        new TransactionChainProxy(actorContext).close();
+        ActorContext context = mock(ActorContext.class);
+
+        new TransactionChainProxy(context).close();
+
+        verify(context, times(1)).broadcast(anyObject());
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/InMemorySnapshotStore.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/utils/InMemorySnapshotStore.java
new file mode 100644 (file)
index 0000000..0e492f0
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.utils;
+
+import akka.dispatch.Futures;
+import akka.japi.Option;
+import akka.persistence.SelectedSnapshot;
+import akka.persistence.SnapshotMetadata;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.snapshot.japi.SnapshotStore;
+import com.google.common.collect.Iterables;
+import scala.concurrent.Future;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class InMemorySnapshotStore extends SnapshotStore {
+
+    Map<String, List<Snapshot>> snapshots = new HashMap<>();
+
+    @Override public Future<Option<SelectedSnapshot>> doLoadAsync(String s,
+        SnapshotSelectionCriteria snapshotSelectionCriteria) {
+        List<Snapshot> snapshotList = snapshots.get(s);
+        if(snapshotList == null){
+            return Futures.successful(Option.<SelectedSnapshot>none());
+        }
+
+        Snapshot snapshot = Iterables.getLast(snapshotList);
+        SelectedSnapshot selectedSnapshot =
+            new SelectedSnapshot(snapshot.getMetadata(), snapshot.getData());
+        return Futures.successful(Option.some(selectedSnapshot));
+    }
+
+    @Override public Future<Void> doSaveAsync(SnapshotMetadata snapshotMetadata, Object o) {
+        List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+
+        if(snapshotList == null){
+            snapshotList = new ArrayList<>();
+            snapshots.put(snapshotMetadata.persistenceId(), snapshotList);
+        }
+        snapshotList.add(new Snapshot(snapshotMetadata, o));
+
+        return Futures.successful(null);
+    }
+
+    @Override public void onSaved(SnapshotMetadata snapshotMetadata) throws Exception {
+    }
+
+    @Override public void doDelete(SnapshotMetadata snapshotMetadata) throws Exception {
+        List<Snapshot> snapshotList = snapshots.get(snapshotMetadata.persistenceId());
+
+        if(snapshotList == null){
+            return;
+        }
+
+        int deleteIndex = -1;
+
+        for(int i=0;i<snapshotList.size(); i++){
+            Snapshot snapshot = snapshotList.get(i);
+            if(snapshotMetadata.equals(snapshot.getMetadata())){
+                deleteIndex = i;
+                break;
+            }
+        }
+
+        if(deleteIndex != -1){
+            snapshotList.remove(deleteIndex);
+        }
+
+    }
+
+    @Override public void doDelete(String s, SnapshotSelectionCriteria snapshotSelectionCriteria)
+        throws Exception {
+        List<Snapshot> snapshotList = snapshots.get(s);
+
+        if(snapshotList == null){
+            return;
+        }
+
+        // TODO : This is a quick and dirty implementation. Do actual match later.
+        snapshotList.clear();
+        snapshots.remove(s);
+    }
+
+    private static class Snapshot {
+        private final SnapshotMetadata metadata;
+        private final Object data;
+
+        private Snapshot(SnapshotMetadata metadata, Object data) {
+            this.metadata = metadata;
+            this.data = data;
+        }
+
+        public SnapshotMetadata getMetadata() {
+            return metadata;
+        }
+
+        public Object getData() {
+            return data;
+        }
+    }
+}
index 8d49c6fac32bb85213a5c7a5906352e62bc41e58..06c5767bd030c1b5872969025902566a7cc3e27c 100644 (file)
@@ -16,6 +16,7 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 public class MockConfiguration implements Configuration{
     @Override public List<String> getMemberShardNames(String memberName) {
@@ -46,4 +47,8 @@ public class MockConfiguration implements Configuration{
 
         return Collections.EMPTY_LIST;
     }
+
+    @Override public Set<String> getAllShardNames() {
+        return Collections.emptySet();
+    }
 }
index 2671be80bb93738b34ea0832a04fea3dfcf4c9db..a2b78c6c152dd350b7a4f8bffc075af0ea97cd92 100644 (file)
@@ -13,7 +13,7 @@ import akka.actor.ActorSystem;
 import akka.actor.Props;
 import akka.actor.UntypedActor;
 import com.typesafe.config.ConfigFactory;
-import org.opendaylight.controller.cluster.datastore.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
 import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
 import org.opendaylight.controller.cluster.example.messages.KeyValue;
index 0e6d5353014efb094e2536a177f8c521a008bf66..e6bdf5aac379571ba0b130c1b3076e9c25f75299 100644 (file)
@@ -12,7 +12,7 @@ import akka.actor.ActorSystem;
 import akka.actor.Props;
 import akka.actor.UntypedActor;
 import com.typesafe.config.ConfigFactory;
-import org.opendaylight.controller.cluster.datastore.CompositeModificationPayload;
+import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
 import org.opendaylight.controller.cluster.example.messages.KeyValue;
 import org.opendaylight.controller.cluster.raft.messages.AppendEntries;
 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
index 6851b1b72ce39d4c011cb0771b95014bb31ade32..f0dadc618b2b4769b0240ff1c55567b28c924fb9 100644 (file)
@@ -1,4 +1,6 @@
 akka {
+    persistence.snapshot-store.plugin = "in-memory-snapshot-store"
+
     loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
 
     actor {
@@ -14,8 +16,16 @@ akka {
         }
     }
 }
+
+in-memory-snapshot-store {
+  # Class name of the plugin.
+  class = "org.opendaylight.controller.cluster.datastore.utils.InMemorySnapshotStore"
+  # Dispatcher for the plugin actor.
+  plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
+}
+
 bounded-mailbox {
-  mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+  mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
   mailbox-capacity = 1000
   mailbox-push-timeout-time = 100ms
 }
index b423bbd0e5c644c147a69398a65da0a564c985a1..ac62974d290e5cb37744e39b085952a57aa28543 100644 (file)
@@ -7,10 +7,12 @@
  */
 package org.opendaylight.controller.config.yang.md.sal.dom.impl;
 
+import java.util.EnumMap;
+import java.util.Map;
 import java.util.concurrent.ExecutorService;
-
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
 import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataBrokerImpl;
 import org.opendaylight.controller.md.sal.dom.broker.impl.jmx.CommitStatsMXBeanImpl;
@@ -18,7 +20,6 @@ import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFac
 import org.opendaylight.controller.sal.core.spi.data.DOMStore;
 import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
 import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
-import com.google.common.collect.ImmutableMap;
 
 /**
 *
@@ -59,9 +60,10 @@ public final class DomInmemoryDataBrokerModule extends
            //we will default to InMemoryDOMDataStore creation
            configStore = InMemoryDOMDataStoreFactory.create("DOM-CFG", getSchemaServiceDependency());
         }
-        ImmutableMap<LogicalDatastoreType, DOMStore> datastores = ImmutableMap
-                .<LogicalDatastoreType, DOMStore> builder().put(LogicalDatastoreType.OPERATIONAL, operStore)
-                .put(LogicalDatastoreType.CONFIGURATION, configStore).build();
+
+        final Map<LogicalDatastoreType, DOMStore> datastores = new EnumMap<>(LogicalDatastoreType.class);
+        datastores.put(LogicalDatastoreType.OPERATIONAL, operStore);
+        datastores.put(LogicalDatastoreType.CONFIGURATION, configStore);
 
         /*
          * We use a single-threaded executor for commits with a bounded queue capacity. If the
@@ -88,29 +90,30 @@ public final class DomInmemoryDataBrokerModule extends
 
         DOMDataBrokerImpl newDataBroker = new DOMDataBrokerImpl(datastores,
                 new DeadlockDetectingListeningExecutorService(commitExecutor,
-                    TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION,
+                    TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER,
                     listenableFutureExecutor));
 
         final CommitStatsMXBeanImpl commitStatsMXBean = new CommitStatsMXBeanImpl(
                 newDataBroker.getCommitStatsTracker(), JMX_BEAN_TYPE);
         commitStatsMXBean.registerMBean();
 
-        final ThreadExecutorStatsMXBeanImpl commitExecutorStatsMXBean =
-                new ThreadExecutorStatsMXBeanImpl(commitExecutor, "CommitExecutorStats",
+        final AbstractMXBean commitExecutorStatsMXBean =
+                ThreadExecutorStatsMXBeanImpl.create(commitExecutor, "CommitExecutorStats",
                         JMX_BEAN_TYPE, null);
-        commitExecutorStatsMXBean.registerMBean();
-
-        final ThreadExecutorStatsMXBeanImpl commitFutureStatsMXBean =
-                new ThreadExecutorStatsMXBeanImpl(listenableFutureExecutor,
+        final AbstractMXBean commitFutureStatsMXBean =
+                ThreadExecutorStatsMXBeanImpl.create(listenableFutureExecutor,
                         "CommitFutureExecutorStats", JMX_BEAN_TYPE, null);
-        commitFutureStatsMXBean.registerMBean();
 
         newDataBroker.setCloseable(new AutoCloseable() {
             @Override
             public void close() {
                 commitStatsMXBean.unregisterMBean();
-                commitExecutorStatsMXBean.unregisterMBean();
-                commitFutureStatsMXBean.unregisterMBean();
+                if (commitExecutorStatsMXBean != null) {
+                    commitExecutorStatsMXBean.unregisterMBean();
+                }
+                if (commitFutureStatsMXBean != null) {
+                    commitFutureStatsMXBean.unregisterMBean();
+                }
             }
         });
 
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/HashMapDataStoreModule.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/HashMapDataStoreModule.java
deleted file mode 100644 (file)
index df1b5a3..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.md.sal.dom.impl;
-
-import org.opendaylight.controller.sal.dom.broker.impl.HashMapDataStore;
-
-/**
-*
-*/
-public final class HashMapDataStoreModule extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractHashMapDataStoreModule
-{
-
-    public HashMapDataStoreModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
-        super(identifier, dependencyResolver);
-    }
-
-    public HashMapDataStoreModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, HashMapDataStoreModule oldModule, java.lang.AutoCloseable oldInstance) {
-        super(identifier, dependencyResolver, oldModule, oldInstance);
-    }
-
-    @Override
-    public void validate(){
-        super.validate();
-        // Add custom validation for module attributes here.
-    }
-
-    @Override
-    public java.lang.AutoCloseable createInstance() {
-        HashMapDataStore store = new HashMapDataStore();
-        return store;
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/HashMapDataStoreModuleFactory.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/HashMapDataStoreModuleFactory.java
deleted file mode 100644 (file)
index 6b5503f..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.config.yang.md.sal.dom.impl;
-
-/**
-*
-*/
-public class HashMapDataStoreModuleFactory extends org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractHashMapDataStoreModuleFactory
-{
-
-
-}
index d3791a08782b6cbdf0217b85834af12bbf1b9fab..15d53f53103c6d4aef8aa3933d6980ae09f75d04 100644 (file)
@@ -6,14 +6,14 @@
  */
 package org.opendaylight.controller.md.sal.dom.broker.impl;
 
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import java.util.Map;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-
 /**
  * Composite DOM Transaction backed by {@link DOMStoreTransaction}.
  *
@@ -29,7 +29,7 @@ import com.google.common.collect.ImmutableMap;
 abstract class AbstractDOMForwardedCompositeTransaction<K, T extends DOMStoreTransaction> implements
         AsyncTransaction<YangInstanceIdentifier, NormalizedNode<?, ?>> {
 
-    private final ImmutableMap<K, T> backingTxs;
+    private final Map<K, T> backingTxs;
     private final Object identifier;
 
     /**
@@ -41,7 +41,7 @@ abstract class AbstractDOMForwardedCompositeTransaction<K, T extends DOMStoreTra
      * @param backingTxs
      *            Key,value map of backing transactions.
      */
-    protected AbstractDOMForwardedCompositeTransaction(final Object identifier, final ImmutableMap<K, T> backingTxs) {
+    protected AbstractDOMForwardedCompositeTransaction(final Object identifier, final Map<K, T> backingTxs) {
         this.identifier = Preconditions.checkNotNull(identifier, "Identifier should not be null");
         this.backingTxs = Preconditions.checkNotNull(backingTxs, "Backing transactions should not be null");
     }
@@ -58,15 +58,17 @@ abstract class AbstractDOMForwardedCompositeTransaction<K, T extends DOMStoreTra
      */
     protected final T getSubtransaction(final K key) {
         Preconditions.checkNotNull(key, "key must not be null.");
-        Preconditions.checkArgument(backingTxs.containsKey(key), "No subtransaction associated with %s", key);
-        return backingTxs.get(key);
+
+        final T ret = backingTxs.get(key);
+        Preconditions.checkArgument(ret != null, "No subtransaction associated with %s", key);
+        return ret;
     }
 
     /**
      * Returns immutable Iterable of all subtransactions.
      *
      */
-    protected Iterable<T> getSubtransactions() {
+    protected Collection<T> getSubtransactions() {
         return backingTxs.values();
     }
 
@@ -77,9 +79,8 @@ abstract class AbstractDOMForwardedCompositeTransaction<K, T extends DOMStoreTra
 
     protected void closeSubtransactions() {
         /*
-         *  We share one exception for all failures, which are added
-         *  as supressedExceptions to it.
-         *
+         * We share one exception for all failures, which are added
+         * as supressedExceptions to it.
          */
         IllegalStateException failure = null;
         for (T subtransaction : backingTxs.values()) {
@@ -87,17 +88,17 @@ abstract class AbstractDOMForwardedCompositeTransaction<K, T extends DOMStoreTra
                 subtransaction.close();
             } catch (Exception e) {
                 // If we did not allocated failure we allocate it
-                if(failure == null) {
-                    failure = new IllegalStateException("Uncaught exception occured during closing transaction.", e);
+                if (failure == null) {
+                    failure = new IllegalStateException("Uncaught exception occured during closing transaction", e);
                 } else {
-                    // We update it with addotional exceptions, which occured during error.
+                    // We update it with additional exceptions, which occurred during error.
                     failure.addSuppressed(e);
                 }
             }
         }
         // If we have failure, we throw it at after all attempts to close.
-        if(failure != null) {
+        if (failure != null) {
             throw failure;
         }
     }
-}
\ No newline at end of file
+}
index d354cca005974c332c2814c76e696f774b772bc4..c1ac0e1a1fd7cbc0cce85f10ed934833a35823ec 100644 (file)
@@ -7,11 +7,11 @@
  */
 package org.opendaylight.controller.md.sal.dom.broker.impl;
 
+import com.google.common.base.Preconditions;
+import java.util.EnumMap;
 import java.util.Map;
 import java.util.Map.Entry;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
@@ -21,9 +21,6 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransactio
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionFactory;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-
 /**
  *
  * Abstract composite transaction factory.
@@ -40,14 +37,15 @@ import com.google.common.collect.ImmutableMap;
  * @param <T>
  *            Type of {@link DOMStoreTransactionFactory} factory.
  */
-public abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreTransactionFactory> implements DOMDataCommitImplementation, AutoCloseable {
-
-    private final ImmutableMap<LogicalDatastoreType, T> storeTxFactories;
-
-    private boolean closed;
+abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreTransactionFactory> implements DOMDataCommitImplementation, AutoCloseable {
+    @SuppressWarnings("rawtypes")
+    private static final AtomicIntegerFieldUpdater<AbstractDOMForwardedTransactionFactory> UPDATER =
+            AtomicIntegerFieldUpdater.newUpdater(AbstractDOMForwardedTransactionFactory.class, "closed");
+    private final Map<LogicalDatastoreType, T> storeTxFactories;
+    private volatile int closed = 0;
 
     protected AbstractDOMForwardedTransactionFactory(final Map<LogicalDatastoreType, ? extends T> txFactories) {
-        this.storeTxFactories = ImmutableMap.copyOf(txFactories);
+        this.storeTxFactories = new EnumMap<>(txFactories);
     }
 
     /**
@@ -74,17 +72,16 @@ public abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreT
      *
      * @return New composite read-only transaction.
      */
-    public DOMDataReadOnlyTransaction newReadOnlyTransaction() {
+    public final DOMDataReadOnlyTransaction newReadOnlyTransaction() {
         checkNotClosed();
-        ImmutableMap.Builder<LogicalDatastoreType, DOMStoreReadTransaction> builder = ImmutableMap.builder();
+
+        final Map<LogicalDatastoreType, DOMStoreReadTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
         for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
-            builder.put(store.getKey(), store.getValue().newReadOnlyTransaction());
+            txns.put(store.getKey(), store.getValue().newReadOnlyTransaction());
         }
-        return new DOMForwardedReadOnlyTransaction(newTransactionIdentifier(), builder.build());
+        return new DOMForwardedReadOnlyTransaction(newTransactionIdentifier(), txns);
     }
 
-
-
     /**
      * Creates a new composite write-only transaction
      *
@@ -124,14 +121,14 @@ public abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreT
      * @return New composite write-only transaction associated with this
      *         factory.
      */
-    public DOMDataWriteTransaction newWriteOnlyTransaction() {
+    public final DOMDataWriteTransaction newWriteOnlyTransaction() {
         checkNotClosed();
-        ImmutableMap.Builder<LogicalDatastoreType, DOMStoreWriteTransaction> builder = ImmutableMap.builder();
+
+        final Map<LogicalDatastoreType, DOMStoreWriteTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
         for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
-            builder.put(store.getKey(), store.getValue().newWriteOnlyTransaction());
+            txns.put(store.getKey(), store.getValue().newWriteOnlyTransaction());
         }
-        return new DOMForwardedWriteTransaction<DOMStoreWriteTransaction>(newTransactionIdentifier(), builder.build(),
-                this);
+        return new DOMForwardedWriteTransaction<DOMStoreWriteTransaction>(newTransactionIdentifier(), txns, this);
     }
 
     /**
@@ -177,15 +174,15 @@ public abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreT
      *
      * @return New composite read-write transaction associated with this
      *         factory.
-     *
      */
-    public DOMDataReadWriteTransaction newReadWriteTransaction() {
+    public final DOMDataReadWriteTransaction newReadWriteTransaction() {
         checkNotClosed();
-        ImmutableMap.Builder<LogicalDatastoreType, DOMStoreReadWriteTransaction> builder = ImmutableMap.builder();
+
+        final Map<LogicalDatastoreType, DOMStoreReadWriteTransaction> txns = new EnumMap<>(LogicalDatastoreType.class);
         for (Entry<LogicalDatastoreType, T> store : storeTxFactories.entrySet()) {
-            builder.put(store.getKey(), store.getValue().newReadWriteTransaction());
+            txns.put(store.getKey(), store.getValue().newReadWriteTransaction());
         }
-        return new DOMForwardedReadWriteTransaction(newTransactionIdentifier(), builder.build(), this);
+        return new DOMForwardedReadWriteTransaction(newTransactionIdentifier(), txns, this);
     }
 
     /**
@@ -203,21 +200,19 @@ public abstract class AbstractDOMForwardedTransactionFactory<T extends DOMStoreT
     }
 
     /**
-     *
      * Checks if instance is not closed.
      *
      * @throws IllegalStateException If instance of this class was closed.
      *
      */
-    @GuardedBy("this")
-    protected synchronized void checkNotClosed() {
-        Preconditions.checkState(!closed,"Transaction factory was closed. No further operations allowed.");
+    protected final void checkNotClosed() {
+        Preconditions.checkState(closed == 0, "Transaction factory was closed. No further operations allowed.");
     }
 
     @Override
-    @GuardedBy("this")
-    public synchronized void close() {
-        closed = true;
+    public void close() {
+        final boolean success = UPDATER.compareAndSet(this, 0, 1);
+        Preconditions.checkState(success, "Transaction factory was already closed");
     }
-
 }
+
index d63d6cbe3674fb44e9131e8cbf99dc52453e5ae5..8ed52061328bab92eb9074fabc9df010f7f0a9d7 100644 (file)
@@ -8,10 +8,13 @@
 package org.opendaylight.controller.md.sal.dom.broker.impl;
 
 import static com.google.common.base.Preconditions.checkState;
-
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import java.util.EnumMap;
+import java.util.Map;
 import java.util.Map.Entry;
 import java.util.concurrent.atomic.AtomicLong;
-
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
@@ -28,11 +31,6 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-
 public class DOMDataBrokerImpl extends AbstractDOMForwardedTransactionFactory<DOMStore> implements DOMDataBroker,
         AutoCloseable {
 
@@ -43,13 +41,13 @@ public class DOMDataBrokerImpl extends AbstractDOMForwardedTransactionFactory<DO
     private final AtomicLong chainNum = new AtomicLong();
     private volatile AutoCloseable closeable;
 
-    public DOMDataBrokerImpl(final ImmutableMap<LogicalDatastoreType, DOMStore> datastores,
+    public DOMDataBrokerImpl(final Map<LogicalDatastoreType, DOMStore> datastores,
             final ListeningExecutorService executor) {
         super(datastores);
         this.coordinator = new DOMDataCommitCoordinatorImpl(executor);
     }
 
-    public void setCloseable(AutoCloseable closeable) {
+    public void setCloseable(final AutoCloseable closeable) {
         this.closeable = closeable;
     }
 
@@ -86,13 +84,14 @@ public class DOMDataBrokerImpl extends AbstractDOMForwardedTransactionFactory<DO
 
     @Override
     public DOMTransactionChain createTransactionChain(final TransactionChainListener listener) {
-        ImmutableMap.Builder<LogicalDatastoreType, DOMStoreTransactionChain> backingChainsBuilder = ImmutableMap
-                .builder();
+        checkNotClosed();
+
+        final Map<LogicalDatastoreType, DOMStoreTransactionChain> backingChains = new EnumMap<>(LogicalDatastoreType.class);
         for (Entry<LogicalDatastoreType, DOMStore> entry : getTxFactories().entrySet()) {
-            backingChainsBuilder.put(entry.getKey(), entry.getValue().createTransactionChain());
+            backingChains.put(entry.getKey(), entry.getValue().createTransactionChain());
         }
-        long chainId = chainNum.getAndIncrement();
-        ImmutableMap<LogicalDatastoreType, DOMStoreTransactionChain> backingChains = backingChainsBuilder.build();
+
+        final long chainId = chainNum.getAndIncrement();
         LOG.debug("Transactoin chain {} created with listener {}, backing store chains {}", chainId, listener,
                 backingChains);
         return new DOMDataBrokerTransactionChainImpl(chainId, backingChains, coordinator, listener);
index 227693ca4df5015f79d8f88cf02e666be7f25e39..7cd6afa466e7d57b57f6861ac12aabf30bf90347 100644 (file)
@@ -6,10 +6,11 @@
  */
 package org.opendaylight.controller.md.sal.dom.broker.impl;
 
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
 import java.util.concurrent.atomic.AtomicLong;
-
-import javax.annotation.concurrent.GuardedBy;
-
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
@@ -20,11 +21,6 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
 /**
  * NormalizedNode implementation of {@link org.opendaylight.controller.md.sal.common.api.data.TransactionChain} which is backed
  * by several {@link DOMStoreTransactionChain} differentiated by provided
@@ -35,12 +31,12 @@ public class DOMDataBrokerTransactionChainImpl extends AbstractDOMForwardedTrans
         implements DOMTransactionChain, DOMDataCommitErrorListener {
 
     private static final Logger LOG = LoggerFactory.getLogger(DOMDataBrokerTransactionChainImpl.class);
+    private final AtomicLong txNum = new AtomicLong();
     private final DOMDataCommitExecutor coordinator;
     private final TransactionChainListener listener;
     private final long chainId;
-    private final AtomicLong txNum = new AtomicLong();
-    @GuardedBy("this")
-    private boolean failed = false;
+
+    private volatile boolean failed = false;
 
     /**
      *
@@ -58,7 +54,7 @@ public class DOMDataBrokerTransactionChainImpl extends AbstractDOMForwardedTrans
      *             If any of arguments is null.
      */
     public DOMDataBrokerTransactionChainImpl(final long chainId,
-            final ImmutableMap<LogicalDatastoreType, DOMStoreTransactionChain> chains,
+            final Map<LogicalDatastoreType, DOMStoreTransactionChain> chains,
             final DOMDataCommitExecutor coordinator, final TransactionChainListener listener) {
         super(chains);
         this.chainId = chainId;
@@ -72,26 +68,30 @@ public class DOMDataBrokerTransactionChainImpl extends AbstractDOMForwardedTrans
     }
 
     @Override
-    public synchronized CheckedFuture<Void,TransactionCommitFailedException> submit(
+    public CheckedFuture<Void,TransactionCommitFailedException> submit(
             final DOMDataWriteTransaction transaction, final Iterable<DOMStoreThreePhaseCommitCohort> cohorts) {
+        checkNotClosed();
+
         return coordinator.submit(transaction, cohorts, Optional.<DOMDataCommitErrorListener> of(this));
     }
 
     @Override
-    public synchronized void close() {
+    public void close() {
         super.close();
+
         for (DOMStoreTransactionChain subChain : getTxFactories().values()) {
             subChain.close();
         }
 
         if (!failed) {
             LOG.debug("Transaction chain {} successfully finished.", this);
+            // FIXME: this event should be emitted once all operations complete
             listener.onTransactionChainSuccessful(this);
         }
     }
 
     @Override
-    public synchronized void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
+    public void onCommitFailed(final DOMDataWriteTransaction tx, final Throwable cause) {
         failed = true;
         LOG.debug("Transaction chain {} failed.", this, cause);
         listener.onTransactionChainFailed(this, tx, cause);
index 3fde8d360f8af6df8cb0bcd705a9e3289d9fd35e..77cf105ed6a6e676819593dd19ccfcdc8580897d 100644 (file)
@@ -6,13 +6,18 @@
  */
 package org.opendaylight.controller.md.sal.dom.broker.impl;
 
-import java.util.List;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Throwables;
+import com.google.common.collect.Iterables;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.RejectedExecutionException;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataWriteTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
@@ -21,17 +26,6 @@ import org.opendaylight.yangtools.util.concurrent.MappingCheckedFuture;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Function;
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Throwables;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableList.Builder;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-
 /**
  *
  * Implementation of blocking three phase commit coordinator, which which
@@ -49,28 +43,8 @@ import com.google.common.util.concurrent.ListeningExecutorService;
 public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor {
 
     private static final Logger LOG = LoggerFactory.getLogger(DOMDataCommitCoordinatorImpl.class);
-
-    /**
-     * Runs AND binary operation between all booleans in supplied iteration of booleans.
-     *
-     * This method will stop evaluating iterables if first found is false.
-     */
-    private static final Function<Iterable<Boolean>, Boolean> AND_FUNCTION = new Function<Iterable<Boolean>, Boolean>() {
-
-        @Override
-        public Boolean apply(final Iterable<Boolean> input) {
-            for(boolean value : input) {
-               if(!value) {
-                   return Boolean.FALSE;
-               }
-            }
-            return Boolean.TRUE;
-        }
-    };
-
-    private final ListeningExecutorService executor;
-
     private final DurationStatsTracker commitStatsTracker = new DurationStatsTracker();
+    private final ListeningExecutorService executor;
 
     /**
      *
@@ -153,19 +127,17 @@ public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor {
     }
 
     /**
-     *
      * Implementation of blocking three-phase commit-coordination tasks without
-     * support of cancelation.
-     *
+     * support of cancellation.
      */
-    private static class CommitCoordinationTask implements Callable<Void> {
-
+    private static final class CommitCoordinationTask implements Callable<Void> {
+        private static final AtomicReferenceFieldUpdater<CommitCoordinationTask, CommitPhase> PHASE_UPDATER =
+                AtomicReferenceFieldUpdater.newUpdater(CommitCoordinationTask.class, CommitPhase.class, "currentPhase");
         private final DOMDataWriteTransaction tx;
         private final Iterable<DOMStoreThreePhaseCommitCohort> cohorts;
         private final DurationStatsTracker commitStatTracker;
-
-        @GuardedBy("this")
-        private CommitPhase currentPhase;
+        private final int cohortSize;
+        private volatile CommitPhase currentPhase = CommitPhase.SUBMITTED;
 
         public CommitCoordinationTask(final DOMDataWriteTransaction transaction,
                 final Iterable<DOMStoreThreePhaseCommitCohort> cohorts,
@@ -173,25 +145,26 @@ public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor {
                 final DurationStatsTracker commitStatTracker) {
             this.tx = Preconditions.checkNotNull(transaction, "transaction must not be null");
             this.cohorts = Preconditions.checkNotNull(cohorts, "cohorts must not be null");
-            this.currentPhase = CommitPhase.SUBMITTED;
             this.commitStatTracker = commitStatTracker;
+            this.cohortSize = Iterables.size(cohorts);
         }
 
         @Override
         public Void call() throws TransactionCommitFailedException {
+            final long startTime = commitStatTracker != null ? System.nanoTime() : 0;
 
-            long startTime = System.nanoTime();
             try {
                 canCommitBlocking();
                 preCommitBlocking();
                 commitBlocking();
                 return null;
             } catch (TransactionCommitFailedException e) {
-                LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), currentPhase, e);
-                abortBlocking(e);
+                final CommitPhase phase = currentPhase;
+                LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), phase, e);
+                abortBlocking(e, phase);
                 throw e;
             } finally {
-                if(commitStatTracker != null) {
+                if (commitStatTracker != null) {
                     commitStatTracker.addDuration(System.nanoTime() - startTime);
                 }
             }
@@ -210,78 +183,63 @@ public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor {
          *
          */
         private void canCommitBlocking() throws TransactionCommitFailedException {
-            final Boolean canCommitResult = canCommitAll().checkedGet();
-            if (!canCommitResult) {
-                throw new TransactionCommitFailedException("Can Commit failed, no detailed cause available.");
+            for (ListenableFuture<?> canCommit : canCommitAll()) {
+                try {
+                    final Boolean result = (Boolean)canCommit.get();
+                    if (result == null || !result) {
+                        throw new TransactionCommitFailedException("Can Commit failed, no detailed cause available.");
+                    }
+                } catch (InterruptedException | ExecutionException e) {
+                    throw TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER.apply(e);
+                }
             }
         }
 
         /**
          *
-         * Invokes preCommit on underlying cohorts and blocks till
-         * all results are returned.
+         * Invokes canCommit on underlying cohorts and returns composite future
+         * which will contains {@link Boolean#TRUE} only and only if
+         * all cohorts returned true.
          *
-         * Valid state transition is from CAN_COMMIT to PRE_COMMIT, if current
-         * state is not CAN_COMMIT
-         * throws IllegalStateException.
+         * Valid state transition is from SUBMITTED to CAN_COMMIT,
+         * if currentPhase is not SUBMITTED throws IllegalStateException.
          *
-         * @throws TransactionCommitFailedException
-         *             If one of cohorts failed preCommit
+         * @return List of all cohorts futures from can commit phase.
          *
          */
-        private void preCommitBlocking() throws TransactionCommitFailedException {
-            preCommitAll().checkedGet();
+        private ListenableFuture<?>[] canCommitAll() {
+            changeStateFrom(CommitPhase.SUBMITTED, CommitPhase.CAN_COMMIT);
+
+            final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+            int i = 0;
+            for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
+                ops[i++] = cohort.canCommit();
+            }
+            return ops;
         }
 
         /**
          *
-         * Invokes commit on underlying cohorts and blocks till
+         * Invokes preCommit on underlying cohorts and blocks till
          * all results are returned.
          *
-         * Valid state transition is from PRE_COMMIT to COMMIT, if not throws
-         * IllegalStateException.
+         * Valid state transition is from CAN_COMMIT to PRE_COMMIT, if current
+         * state is not CAN_COMMIT
+         * throws IllegalStateException.
          *
          * @throws TransactionCommitFailedException
          *             If one of cohorts failed preCommit
          *
          */
-        private void commitBlocking() throws TransactionCommitFailedException {
-            commitAll().checkedGet();
-        }
-
-        /**
-         * Aborts transaction.
-         *
-         * Invokes {@link DOMStoreThreePhaseCommitCohort#abort()} on all
-         * cohorts, blocks
-         * for all results. If any of the abort failed throws
-         * IllegalStateException,
-         * which will contains originalCause as suppressed Exception.
-         *
-         * If aborts we're successful throws supplied exception
-         *
-         * @param originalCause
-         *            Exception which should be used to fail transaction for
-         *            consumers of transaction
-         *            future and listeners of transaction failure.
-         * @throws TransactionCommitFailedException
-         *             on invocation of this method.
-         *             originalCa
-         * @throws IllegalStateException
-         *             if abort failed.
-         */
-        private void abortBlocking(final TransactionCommitFailedException originalCause)
-                throws TransactionCommitFailedException {
-            LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), currentPhase, originalCause);
-            Exception cause = originalCause;
+        private void preCommitBlocking() throws TransactionCommitFailedException {
+            final ListenableFuture<?>[] preCommitFutures = preCommitAll();
             try {
-                abortAsyncAll().get();
+                for(ListenableFuture<?> future : preCommitFutures) {
+                    future.get();
+                }
             } catch (InterruptedException | ExecutionException e) {
-                LOG.error("Tx: {} Error during Abort.", tx.getIdentifier(), e);
-                cause = new IllegalStateException("Abort failed.", e);
-                cause.addSuppressed(e);
+                throw TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER.apply(e);
             }
-            Throwables.propagateIfPossible(cause, TransactionCommitFailedException.class);
         }
 
         /**
@@ -295,27 +253,41 @@ public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor {
          * state is not CAN_COMMIT
          * throws IllegalStateException.
          *
-         * @return Future which will complete once all cohorts completed
-         *         preCommit.
-         *         Future throws TransactionCommitFailedException
-         *         If any of cohorts failed preCommit
+         * @return List of all cohorts futures from can commit phase.
          *
          */
-        private CheckedFuture<Void, TransactionCommitFailedException> preCommitAll() {
+        private ListenableFuture<?>[] preCommitAll() {
             changeStateFrom(CommitPhase.CAN_COMMIT, CommitPhase.PRE_COMMIT);
-            Builder<ListenableFuture<Void>> ops = ImmutableList.builder();
+
+            final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+            int i = 0;
             for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
-                ops.add(cohort.preCommit());
+                ops[i++] = cohort.preCommit();
+            }
+            return ops;
+        }
+
+        /**
+         *
+         * Invokes commit on underlying cohorts and blocks till
+         * all results are returned.
+         *
+         * Valid state transition is from PRE_COMMIT to COMMIT, if not throws
+         * IllegalStateException.
+         *
+         * @throws TransactionCommitFailedException
+         *             If one of cohorts failed preCommit
+         *
+         */
+        private void commitBlocking() throws TransactionCommitFailedException {
+            final ListenableFuture<?>[] commitFutures = commitAll();
+            try {
+                for(ListenableFuture<?> future : commitFutures) {
+                    future.get();
+                }
+            } catch (InterruptedException | ExecutionException e) {
+                throw TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER.apply(e);
             }
-            /*
-             * We are returing all futures as list, not only succeeded ones in
-             * order to fail composite future if any of them failed.
-             * See Futures.allAsList for this description.
-             */
-            @SuppressWarnings({ "unchecked", "rawtypes" })
-            ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops.build());
-            return MappingCheckedFuture.create(compositeResult,
-                                         TransactionCommitFailedExceptionMapper.PRE_COMMIT_MAPPER);
         }
 
         /**
@@ -327,80 +299,80 @@ public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor {
          * Valid state transition is from PRE_COMMIT to COMMIT, if not throws
          * IllegalStateException
          *
-         * @return Future which will complete once all cohorts completed
-         *         commit.
-         *         Future throws TransactionCommitFailedException
-         *         If any of cohorts failed preCommit
+         * @return List of all cohorts futures from can commit phase.
          *
          */
-        private CheckedFuture<Void, TransactionCommitFailedException> commitAll() {
+        private ListenableFuture<?>[] commitAll() {
             changeStateFrom(CommitPhase.PRE_COMMIT, CommitPhase.COMMIT);
-            Builder<ListenableFuture<Void>> ops = ImmutableList.builder();
+
+            final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+            int i = 0;
             for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
-                ops.add(cohort.commit());
+                ops[i++] = cohort.commit();
             }
-            /*
-             * We are returing all futures as list, not only succeeded ones in
-             * order to fail composite future if any of them failed.
-             * See Futures.allAsList for this description.
-             */
-            @SuppressWarnings({ "unchecked", "rawtypes" })
-            ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops.build());
-            return MappingCheckedFuture.create(compositeResult,
-                                     TransactionCommitFailedExceptionMapper.COMMIT_ERROR_MAPPER);
+            return ops;
         }
 
         /**
+         * Aborts transaction.
          *
-         * Invokes canCommit on underlying cohorts and returns composite future
-         * which will contains {@link Boolean#TRUE} only and only if
-         * all cohorts returned true.
-         *
-         * Valid state transition is from SUBMITTED to CAN_COMMIT,
-         * if currentPhase is not SUBMITTED throws IllegalStateException.
+         * Invokes {@link DOMStoreThreePhaseCommitCohort#abort()} on all
+         * cohorts, blocks
+         * for all results. If any of the abort failed throws
+         * IllegalStateException,
+         * which will contains originalCause as suppressed Exception.
          *
-         * @return Future which will complete once all cohorts completed
-         *         preCommit.
-         *         Future throws TransactionCommitFailedException
-         *         If any of cohorts failed preCommit
+         * If aborts we're successful throws supplied exception
          *
+         * @param originalCause
+         *            Exception which should be used to fail transaction for
+         *            consumers of transaction
+         *            future and listeners of transaction failure.
+         * @param phase phase in which the problem ensued
+         * @throws TransactionCommitFailedException
+         *             on invocation of this method.
+         *             originalCa
+         * @throws IllegalStateException
+         *             if abort failed.
          */
-        private CheckedFuture<Boolean, TransactionCommitFailedException> canCommitAll() {
-            changeStateFrom(CommitPhase.SUBMITTED, CommitPhase.CAN_COMMIT);
-            Builder<ListenableFuture<Boolean>> canCommitOperations = ImmutableList.builder();
-            for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
-                canCommitOperations.add(cohort.canCommit());
+        private void abortBlocking(final TransactionCommitFailedException originalCause, final CommitPhase phase)
+                throws TransactionCommitFailedException {
+            LOG.warn("Tx: {} Error during phase {}, starting Abort", tx.getIdentifier(), phase, originalCause);
+            Exception cause = originalCause;
+            try {
+                abortAsyncAll(phase).get();
+            } catch (InterruptedException | ExecutionException e) {
+                LOG.error("Tx: {} Error during Abort.", tx.getIdentifier(), e);
+                cause = new IllegalStateException("Abort failed.", e);
+                cause.addSuppressed(e);
             }
-            ListenableFuture<List<Boolean>> allCanCommits = Futures.allAsList(canCommitOperations.build());
-            ListenableFuture<Boolean> allSuccessFuture = Futures.transform(allCanCommits, AND_FUNCTION);
-            return MappingCheckedFuture.create(allSuccessFuture,
-                                       TransactionCommitFailedExceptionMapper.CAN_COMMIT_ERROR_MAPPER);
-
+            Throwables.propagateIfPossible(cause, TransactionCommitFailedException.class);
         }
 
         /**
-         *
          * Invokes abort on underlying cohorts and returns future which
-         * completes
-         * once all abort on cohorts are completed.
+         * completes once all abort on cohorts are completed.
          *
+         * @param phase phase in which the problem ensued
          * @return Future which will complete once all cohorts completed
          *         abort.
-         *
          */
-        private ListenableFuture<Void> abortAsyncAll() {
-            changeStateFrom(currentPhase, CommitPhase.ABORT);
-            Builder<ListenableFuture<Void>> ops = ImmutableList.builder();
+        private ListenableFuture<Void> abortAsyncAll(final CommitPhase phase) {
+            changeStateFrom(phase, CommitPhase.ABORT);
+
+            final ListenableFuture<?>[] ops = new ListenableFuture<?>[cohortSize];
+            int i = 0;
             for (DOMStoreThreePhaseCommitCohort cohort : cohorts) {
-                ops.add(cohort.abort());
+                ops[i++] = cohort.abort();
             }
+
             /*
-             * We are returing all futures as list, not only succeeded ones in
+             * We are returning all futures as list, not only succeeded ones in
              * order to fail composite future if any of them failed.
              * See Futures.allAsList for this description.
              */
             @SuppressWarnings({ "unchecked", "rawtypes" })
-            ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops.build());
+            ListenableFuture<Void> compositeResult = (ListenableFuture) Futures.allAsList(ops);
             return compositeResult;
         }
 
@@ -423,14 +395,13 @@ public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor {
          * @throws IllegalStateException
          *             If currentState of task does not match expected state
          */
-        private synchronized void changeStateFrom(final CommitPhase currentExpected, final CommitPhase newState) {
-            Preconditions.checkState(currentPhase.equals(currentExpected),
-                    "Invalid state transition: Tx: %s current state: %s new state: %s", tx.getIdentifier(),
-                    currentPhase, newState);
-            LOG.debug("Transaction {}: Phase {} Started ", tx.getIdentifier(), newState);
-            currentPhase = newState;
-        };
+        private void changeStateFrom(final CommitPhase currentExpected, final CommitPhase newState) {
+            final boolean success = PHASE_UPDATER.compareAndSet(this, currentExpected, newState);
+            Preconditions.checkState(success, "Invalid state transition: Tx: %s expected: %s current: %s target: %s",
+                tx.getIdentifier(), currentExpected, currentPhase, newState);
 
+            LOG.debug("Transaction {}: Phase {} Started", tx.getIdentifier(), newState);
+        };
     }
 
 }
index 5e2a417d28ce22acc6a19a1f556f7ea2c95a4382..124bf9f0bef7de6ac3482cdf3ac907f448b998f6 100644 (file)
@@ -7,6 +7,9 @@
  */
 package org.opendaylight.controller.md.sal.dom.broker.impl;
 
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataReadOnlyTransaction;
@@ -14,12 +17,7 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
 /**
- *
  * Read Only Transaction, which is composed of several
  * {@link DOMStoreReadTransaction} transactions. Subtransaction is selected by
  * {@link LogicalDatastoreType} type parameter in
@@ -30,7 +28,7 @@ class DOMForwardedReadOnlyTransaction extends
         DOMDataReadOnlyTransaction {
 
     protected DOMForwardedReadOnlyTransaction(final Object identifier,
-            final ImmutableMap<LogicalDatastoreType, DOMStoreReadTransaction> backingTxs) {
+            final Map<LogicalDatastoreType, DOMStoreReadTransaction> backingTxs) {
         super(identifier, backingTxs);
     }
 
@@ -40,9 +38,10 @@ class DOMForwardedReadOnlyTransaction extends
         return getSubtransaction(store).read(path);
     }
 
-    @Override public CheckedFuture<Boolean, ReadFailedException> exists(
-        LogicalDatastoreType store,
-        YangInstanceIdentifier path) {
+    @Override
+    public CheckedFuture<Boolean, ReadFailedException> exists(
+        final LogicalDatastoreType store,
+        final YangInstanceIdentifier path) {
         return getSubtransaction(store).exists(path);
     }
 
@@ -50,5 +49,4 @@ class DOMForwardedReadOnlyTransaction extends
     public void close() {
         closeSubtransactions();
     }
-
 }
index 67351ec94583cda374f5b0948eafd1089fd98510..662d48afdb2c33681aa4815c64f2a6992ac6329b 100644 (file)
@@ -6,6 +6,9 @@
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */package org.opendaylight.controller.md.sal.dom.broker.impl;
 
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.util.Map;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
@@ -13,10 +16,6 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransactio
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-
 /**
  *
  * Read-Write Transaction, which is composed of several
@@ -35,12 +34,9 @@ import com.google.common.util.concurrent.CheckedFuture;
  * transactions.
  *
  */
-
-class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction<DOMStoreReadWriteTransaction> implements
-        DOMDataReadWriteTransaction {
-
+final class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction<DOMStoreReadWriteTransaction> implements DOMDataReadWriteTransaction {
     protected DOMForwardedReadWriteTransaction(final Object identifier,
-            final ImmutableMap<LogicalDatastoreType, DOMStoreReadWriteTransaction> backingTxs,
+            final Map<LogicalDatastoreType, DOMStoreReadWriteTransaction> backingTxs,
             final DOMDataCommitImplementation commitImpl) {
         super(identifier, backingTxs, commitImpl);
     }
@@ -51,9 +47,10 @@ class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction<DOMS
         return getSubtransaction(store).read(path);
     }
 
-    @Override public CheckedFuture<Boolean, ReadFailedException> exists(
-        LogicalDatastoreType store,
-        YangInstanceIdentifier path) {
+    @Override
+    public CheckedFuture<Boolean, ReadFailedException> exists(
+        final LogicalDatastoreType store,
+        final YangInstanceIdentifier path) {
         return getSubtransaction(store).exists(path);
     }
 }
index 5d4ad4d803ac90d75c3769b76ef5133e6283cf17..8c84af11ff2ec7aa44eb5b5866de6c0203f14453 100644 (file)
@@ -7,10 +7,15 @@
  */
 package org.opendaylight.controller.md.sal.dom.broker.impl;
 
-import static com.google.common.base.Preconditions.checkState;
-
-import javax.annotation.concurrent.GuardedBy;
-
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Map;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
@@ -21,18 +26,12 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListenableFuture;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
- *
- *
  * Read-Write Transaction, which is composed of several
- * {@link DOMStoreWriteTransaction} transactions. Subtransaction is selected by
+ * {@link DOMStoreWriteTransaction} transactions. A sub-transaction is selected by
  * {@link LogicalDatastoreType} type parameter in:
  *
  * <ul>
@@ -46,114 +45,106 @@ import com.google.common.util.concurrent.ListenableFuture;
  * invocation with all {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort} for underlying
  * transactions.
  *
- * @param <T>
- *            Subtype of {@link DOMStoreWriteTransaction} which is used as
+ * @param <T> Subtype of {@link DOMStoreWriteTransaction} which is used as
  *            subtransaction.
  */
 class DOMForwardedWriteTransaction<T extends DOMStoreWriteTransaction> extends
         AbstractDOMForwardedCompositeTransaction<LogicalDatastoreType, T> implements DOMDataWriteTransaction {
+    @SuppressWarnings("rawtypes")
+    private static final AtomicReferenceFieldUpdater<DOMForwardedWriteTransaction, DOMDataCommitImplementation> IMPL_UPDATER =
+            AtomicReferenceFieldUpdater.newUpdater(DOMForwardedWriteTransaction.class, DOMDataCommitImplementation.class, "commitImpl");
+    @SuppressWarnings("rawtypes")
+    private static final AtomicReferenceFieldUpdater<DOMForwardedWriteTransaction, Future> FUTURE_UPDATER =
+            AtomicReferenceFieldUpdater.newUpdater(DOMForwardedWriteTransaction.class, Future.class, "commitFuture");
+    private static final Logger LOG = LoggerFactory.getLogger(DOMForwardedWriteTransaction.class);
+    private static final Future<?> CANCELLED_FUTURE = Futures.immediateCancelledFuture();
 
     /**
-     *  Implementation of real commit.
-     *
-     *  Transaction can not be commited if commitImpl is null,
-     *  so this seting this property to null is also used to
-     *  prevent write to
-     *  already commited / canceled transaction {@link #checkNotCanceled()
-     *
-     *
+     * Implementation of real commit. It also acts as an indication that
+     * the transaction is running -- which we flip atomically using
+     * {@link #IMPL_UPDATER}.
      */
-    @GuardedBy("this")
     private volatile DOMDataCommitImplementation commitImpl;
 
     /**
+     * Future task of transaction commit. It starts off as null, but is
+     * set appropriately on {@link #submit()} and {@link #cancel()} via
+     * {@link AtomicReferenceFieldUpdater#lazySet(Object, Object)}.
      *
-     * Future task of transaction commit.
-     *
-     * This value is initially null, and is once updated if transaction
-     * is commited {@link #commit()}.
-     * If this future exists, transaction MUST not be commited again
-     * and all modifications should fail. See {@link #checkNotCommited()}.
-     *
+     * Lazy set is safe for use because it is only referenced to in the
+     * {@link #cancel()} slow path, where we will busy-wait for it. The
+     * fast path gets the benefit of a store-store barrier instead of the
+     * usual store-load barrier.
      */
-    @GuardedBy("this")
-    private volatile CheckedFuture<Void, TransactionCommitFailedException> commitFuture;
+    private volatile Future<?> commitFuture;
 
     protected DOMForwardedWriteTransaction(final Object identifier,
-            final ImmutableMap<LogicalDatastoreType, T> backingTxs, final DOMDataCommitImplementation commitImpl) {
+            final Map<LogicalDatastoreType, T> backingTxs, final DOMDataCommitImplementation commitImpl) {
         super(identifier, backingTxs);
         this.commitImpl = Preconditions.checkNotNull(commitImpl, "commitImpl must not be null.");
     }
 
     @Override
     public void put(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        checkNotReady();
+        checkRunning(commitImpl);
         getSubtransaction(store).write(path, data);
     }
 
     @Override
     public void delete(final LogicalDatastoreType store, final YangInstanceIdentifier path) {
-        checkNotReady();
+        checkRunning(commitImpl);
         getSubtransaction(store).delete(path);
     }
 
     @Override
     public void merge(final LogicalDatastoreType store, final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
-        checkNotReady();
+        checkRunning(commitImpl);
         getSubtransaction(store).merge(path, data);
     }
 
     @Override
-    public synchronized boolean cancel() {
-        // Transaction is already canceled, we are safe to return true
-        final boolean cancelationResult;
-        if (commitImpl == null && commitFuture != null) {
-            // Transaction is submitted, we try to cancel future.
-            cancelationResult = commitFuture.cancel(false);
-        } else if(commitImpl == null) {
+    public boolean cancel() {
+        final DOMDataCommitImplementation impl = IMPL_UPDATER.getAndSet(this, null);
+        if (impl != null) {
+            LOG.trace("Transaction {} cancelled before submit", getIdentifier());
+            FUTURE_UPDATER.lazySet(this, CANCELLED_FUTURE);
             return true;
-        } else {
-            cancelationResult = true;
-            commitImpl = null;
         }
-        return cancelationResult;
 
+        // The transaction is in process of being submitted or cancelled. Busy-wait
+        // for the corresponding future.
+        Future<?> future;
+        do {
+            future = commitFuture;
+        } while (future == null);
+
+        return future.cancel(false);
     }
 
     @Override
-    public synchronized ListenableFuture<RpcResult<TransactionStatus>> commit() {
+    public ListenableFuture<RpcResult<TransactionStatus>> commit() {
         return AbstractDataTransaction.convertToLegacyCommitFuture(submit());
     }
 
     @Override
-    public CheckedFuture<Void,TransactionCommitFailedException> submit() {
-        checkNotReady();
+    public CheckedFuture<Void, TransactionCommitFailedException> submit() {
+        final DOMDataCommitImplementation impl = IMPL_UPDATER.getAndSet(this, null);
+        checkRunning(impl);
 
-        ImmutableList.Builder<DOMStoreThreePhaseCommitCohort> cohortsBuilder = ImmutableList.builder();
-        for (DOMStoreWriteTransaction subTx : getSubtransactions()) {
-            cohortsBuilder.add(subTx.ready());
-        }
-        ImmutableList<DOMStoreThreePhaseCommitCohort> cohorts = cohortsBuilder.build();
-        commitFuture = commitImpl.submit(this, cohorts);
-
-        /*
-         *We remove reference to Commit Implementation in order
-         *to prevent memory leak
-         */
-        commitImpl = null;
-        return commitFuture;
-    }
+        final Collection<T> txns = getSubtransactions();
+        final Collection<DOMStoreThreePhaseCommitCohort> cohorts = new ArrayList<>(txns.size());
 
-    private void checkNotReady() {
-        checkNotCommited();
-        checkNotCanceled();
-    }
+        // FIXME: deal with errors thrown by backed (ready and submit can fail in theory)
+        for (DOMStoreWriteTransaction txn : txns) {
+            cohorts.add(txn.ready());
+        }
 
-    private void checkNotCanceled() {
-        Preconditions.checkState(commitImpl != null, "Transaction was canceled.");
+        final CheckedFuture<Void, TransactionCommitFailedException> ret = impl.submit(this, cohorts);
+        FUTURE_UPDATER.lazySet(this, ret);
+        return ret;
     }
 
-    private void checkNotCommited() {
-        checkState(commitFuture == null, "Transaction was already submited.");
+    private void checkRunning(final DOMDataCommitImplementation impl) {
+        Preconditions.checkState(impl != null, "Transaction %s is no longer running", getIdentifier());
     }
-}
\ No newline at end of file
+}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/HashMapDataStore.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/HashMapDataStore.java
deleted file mode 100644 (file)
index 1f82bd7..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.dom.broker.impl;
-
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
-import org.opendaylight.controller.md.sal.common.api.data.DataModification;
-import org.opendaylight.controller.sal.core.api.data.DataStore;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-public final class HashMapDataStore implements DataStore, AutoCloseable {
-    private static final Logger LOG = LoggerFactory
-            .getLogger(HashMapDataStore.class);
-
-    private final Map<YangInstanceIdentifier, CompositeNode> configuration = new ConcurrentHashMap<YangInstanceIdentifier, CompositeNode>();
-    private final Map<YangInstanceIdentifier, CompositeNode> operational = new ConcurrentHashMap<YangInstanceIdentifier, CompositeNode>();
-
-    @Override
-    public boolean containsConfigurationPath(final YangInstanceIdentifier path) {
-        return configuration.containsKey(path);
-    }
-
-    @Override
-    public boolean containsOperationalPath(final YangInstanceIdentifier path) {
-        return operational.containsKey(path);
-    }
-
-    @Override
-    public Iterable<YangInstanceIdentifier> getStoredConfigurationPaths() {
-        return configuration.keySet();
-    }
-
-    @Override
-    public Iterable<YangInstanceIdentifier> getStoredOperationalPaths() {
-        return operational.keySet();
-    }
-
-    @Override
-    public CompositeNode readConfigurationData(final YangInstanceIdentifier path) {
-        LOG.trace("Reading configuration path {}", path);
-        return configuration.get(path);
-    }
-
-    @Override
-    public CompositeNode readOperationalData(YangInstanceIdentifier path) {
-        LOG.trace("Reading operational path {}", path);
-        return operational.get(path);
-    }
-
-    @Override
-    public DataCommitHandler.DataCommitTransaction<YangInstanceIdentifier, CompositeNode> requestCommit(
-            final DataModification<YangInstanceIdentifier, CompositeNode> modification) {
-        return new HashMapDataStoreTransaction(modification, this);
-    }
-
-    public RpcResult<Void> rollback(HashMapDataStoreTransaction transaction) {
-        return RpcResultBuilder.<Void> success().build();
-    }
-
-    public RpcResult<Void> finish(HashMapDataStoreTransaction transaction) {
-        final DataModification<YangInstanceIdentifier, CompositeNode> modification = transaction
-                .getModification();
-        for (final YangInstanceIdentifier removal : modification
-                .getRemovedConfigurationData()) {
-            LOG.trace("Removing configuration path {}", removal);
-            remove(configuration, removal);
-        }
-        for (final YangInstanceIdentifier removal : modification
-                .getRemovedOperationalData()) {
-            LOG.trace("Removing operational path {}", removal);
-            remove(operational, removal);
-        }
-        if (LOG.isTraceEnabled()) {
-            for (final YangInstanceIdentifier a : modification
-                    .getUpdatedConfigurationData().keySet()) {
-                LOG.trace("Adding configuration path {}", a);
-            }
-            for (final YangInstanceIdentifier a : modification
-                    .getUpdatedOperationalData().keySet()) {
-                LOG.trace("Adding operational path {}", a);
-            }
-        }
-        configuration.putAll(modification.getUpdatedConfigurationData());
-        operational.putAll(modification.getUpdatedOperationalData());
-
-        return RpcResultBuilder.<Void> success().build();
-    }
-
-    public void remove(final Map<YangInstanceIdentifier, CompositeNode> map,
-            final YangInstanceIdentifier identifier) {
-        Set<YangInstanceIdentifier> affected = new HashSet<YangInstanceIdentifier>();
-        for (final YangInstanceIdentifier path : map.keySet()) {
-            if (identifier.contains(path)) {
-                affected.add(path);
-            }
-        }
-        for (final YangInstanceIdentifier pathToRemove : affected) {
-            LOG.trace("Removed path {}", pathToRemove);
-            map.remove(pathToRemove);
-        }
-    }
-
-    @Override
-    public void close() {
-        // NOOP
-    }
-}
diff --git a/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/HashMapDataStoreTransaction.java b/opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/HashMapDataStoreTransaction.java
deleted file mode 100644 (file)
index ee026b6..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.dom.broker.impl;
-
-import org.opendaylight.controller.md.sal.common.api.data.DataModification;
-import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler.DataCommitTransaction;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-
-public class HashMapDataStoreTransaction implements
-        DataCommitTransaction<YangInstanceIdentifier, CompositeNode> {
-    private final DataModification<YangInstanceIdentifier, CompositeNode> modification;
-    private final HashMapDataStore datastore;
-
-    HashMapDataStoreTransaction(
-            final DataModification<YangInstanceIdentifier, CompositeNode> modify,
-            final HashMapDataStore store) {
-        modification = modify;
-        datastore = store;
-    }
-
-    @Override
-    public RpcResult<Void> finish() throws IllegalStateException {
-        return datastore.finish(this);
-    }
-
-    @Override
-    public DataModification<YangInstanceIdentifier, CompositeNode> getModification() {
-        return this.modification;
-    }
-
-    @Override
-    public RpcResult<Void> rollback() throws IllegalStateException {
-        return datastore.rollback(this);
-    }
-}
\ No newline at end of file
index b1df7efcdbb6e4efb0ad6fdd2ecbf85aed2b98d1..e81f71a7d2069014b511b1162168131633d62c1a 100644 (file)
@@ -32,12 +32,6 @@ module opendaylight-sal-dom-broker-impl {
         config:provided-service sal:dom-async-data-broker;
     }
     
-    identity hash-map-data-store {
-        base config:module-type;
-        config:provided-service sal:dom-data-store;
-        config:java-name-prefix HashMapDataStore;
-    }
-    
     identity schema-service-singleton {
         base config:module-type;
         config:provided-service sal:schema-service;
@@ -129,12 +123,6 @@ module opendaylight-sal-dom-broker-impl {
         }
     }
     
-    augment "/config:modules/config:module/config:state" {
-        case hash-map-data-store {
-            when "/config:modules/config:module/config:type = 'hash-map-data-store'";
-        }
-    }
-    
     augment "/config:modules/config:module/config:state" {
         case schema-service-singleton {
             when "/config:modules/config:module/config:type = 'schema-service-singleton'";
@@ -149,4 +137,4 @@ module opendaylight-sal-dom-broker-impl {
             } 
         }
     }
-}
\ No newline at end of file
+}
index e57d08f1737fde07dc455eabfc53c2e5304cd53f..674d2ff44a24a959dd7a0ca7f3ecc6bcb5662f03 100644 (file)
@@ -1,12 +1,19 @@
 package org.opendaylight.controller.md.sal.dom.broker.impl;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertEquals;
 import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
 import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
+import com.google.common.base.Optional;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.ForwardingExecutorService;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
 import java.util.Collections;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
@@ -15,7 +22,6 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
-
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -39,15 +45,6 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
-import com.google.common.base.Optional;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.ForwardingExecutorService;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-
 public class DOMBrokerTest {
 
     private SchemaContext schemaContext;
@@ -76,7 +73,7 @@ public class DOMBrokerTest {
         commitExecutor = new CommitExecutorService(Executors.newSingleThreadExecutor());
         futureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(1, 5, "FCB");
         executor = new DeadlockDetectingListeningExecutorService(commitExecutor,
-                TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION, futureExecutor);
+                TransactionCommitDeadlockException.DEADLOCK_EXCEPTION_SUPPLIER, futureExecutor);
         domBroker = new DOMDataBrokerImpl(stores, executor);
     }
 
@@ -215,19 +212,19 @@ public class DOMBrokerTest {
 
         TestDOMDataChangeListener dcListener = new TestDOMDataChangeListener() {
             @Override
-            public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+            public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
 
                 DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
                 writeTx.put( OPERATIONAL, TestModel.TEST2_PATH,
                              ImmutableNodes.containerNode( TestModel.TEST2_QNAME ) );
                 Futures.addCallback( writeTx.submit(), new FutureCallback<Void>() {
                     @Override
-                    public void onSuccess( Void result ) {
+                    public void onSuccess( final Void result ) {
                         commitCompletedLatch.countDown();
                     }
 
                     @Override
-                    public void onFailure( Throwable t ) {
+                    public void onFailure( final Throwable t ) {
                         caughtCommitEx.set( t );
                         commitCompletedLatch.countDown();
                     }
@@ -271,7 +268,7 @@ public class DOMBrokerTest {
 
         TestDOMDataChangeListener dcListener = new TestDOMDataChangeListener() {
             @Override
-            public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+            public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
                 DOMDataWriteTransaction writeTx = domBroker.newWriteOnlyTransaction();
                 writeTx.put( OPERATIONAL, TestModel.TEST2_PATH,
                              ImmutableNodes.containerNode( TestModel.TEST2_QNAME ) );
@@ -333,7 +330,7 @@ public class DOMBrokerTest {
         private final CountDownLatch latch = new CountDownLatch( 1 );
 
         @Override
-        public void onDataChanged( AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
+        public void onDataChanged( final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change ) {
             this.change = change;
             latch.countDown();
         }
@@ -347,7 +344,7 @@ public class DOMBrokerTest {
 
         ExecutorService delegate;
 
-        public CommitExecutorService( ExecutorService delegate ) {
+        public CommitExecutorService( final ExecutorService delegate ) {
             this.delegate = delegate;
         }
 
index d79693030221a256d963b12b7f1b8de9783b421b..613b7a60ab986521c271e3055f68476dab69d37d 100644 (file)
@@ -9,6 +9,7 @@
 package org.opendaylight.controller.md.sal.dom.broker.impl.jmx;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 
 import org.junit.Test;
 import org.opendaylight.yangtools.util.DurationStatsTracker;
@@ -29,13 +30,9 @@ public class CommitStatsMXBeanImplTest {
 
         commitStatsTracker.addDuration(100);
 
-        String prefix = "100.0 ns";
         assertEquals("getTotalCommits", 1L, bean.getTotalCommits());
-        assertEquals("getLongestCommitTime starts with \"" + prefix + "\"", true,
-                     bean.getLongestCommitTime().startsWith("100.0 ns"));
-        assertEquals("getShortestCommitTime starts with \"" + prefix + "\"", true,
-                     bean.getShortestCommitTime().startsWith(prefix));
-        assertEquals("getAverageCommitTime starts with \"" + prefix + "\"", true,
-                     bean.getAverageCommitTime().startsWith(prefix));
+        assertNotNull(bean.getLongestCommitTime());
+        assertNotNull(bean.getShortestCommitTime());
+        assertNotNull(bean.getAverageCommitTime());
     }
 }
index 496f27ecaa1defe4f49d1cd724b58e8a3f673794..d1f11ba9a36e693e223930b2fb4269e03acf21da 100644 (file)
@@ -126,7 +126,7 @@ public class XSQLAdapter extends Thread implements SchemaContextListener {
         return this.bluePrint;
     }
 
-    public List<Object> collectModuleRoots(XSQLBluePrintNode table) {
+    public List<Object> collectModuleRoots(XSQLBluePrintNode table,LogicalDatastoreType type) {
         if (table.getParent().isModule()) {
             try {
                 List<Object> result = new LinkedList<Object>();
@@ -136,8 +136,9 @@ public class XSQLAdapter extends Thread implements SchemaContextListener {
                         .toInstance();
                 DOMDataReadTransaction t = this.domDataBroker
                         .newReadOnlyTransaction();
-                Object node = t.read(LogicalDatastoreType.OPERATIONAL,
+                Object node = t.read(type,
                         instanceIdentifier).get();
+
                 node = XSQLODLUtils.get(node, "reference");
                 if (node == null) {
                     return result;
@@ -157,14 +158,18 @@ public class XSQLAdapter extends Thread implements SchemaContextListener {
                 XSQLAdapter.log(err);
             }
         } else {
-            return collectModuleRoots(table.getParent());
+            return collectModuleRoots(table.getParent(),type);
         }
         return null;
     }
 
     public void execute(JDBCResultSet rs) {
         List<XSQLBluePrintNode> tables = rs.getTables();
-        List<Object> roots = collectModuleRoots(tables.get(0));
+        List<Object> roots = collectModuleRoots(tables.get(0),LogicalDatastoreType.OPERATIONAL);
+        roots.addAll(collectModuleRoots(tables.get(0),LogicalDatastoreType.CONFIGURATION));
+        if(roots.isEmpty()){
+            rs.setFinished(true);
+        }
         XSQLBluePrintNode main = rs.getMainTable();
         List<NETask> tasks = new LinkedList<XSQLAdapter.NETask>();
 
@@ -487,13 +492,15 @@ public class XSQLAdapter extends Thread implements SchemaContextListener {
                     out.print(prompt);
                     char c = 0;
                     byte data[] = new byte[1];
-                    while (c != '\n') {
+                    while (!socket.isClosed() && socket.isConnected() && !socket.isInputShutdown() && c != '\n') {
                         try {
                             in.read(data);
                             c = (char) data[0];
                             inputString.append(c);
                         } catch (Exception err) {
                             err.printStackTrace(out);
+                            stopped = true;
+                            break;
                         }
                     }
 
index 3e748618169889cdfcf0b9f8eb66c73e65cb4338..74fa73afb92f869f7cb2e945a625d489b71e71c2 100644 (file)
@@ -8,14 +8,12 @@
 package org.opendaylight.controller.md.sal.dom.store.impl;
 
 import static com.google.common.base.Preconditions.checkState;
-
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
@@ -62,8 +60,7 @@ import org.slf4j.LoggerFactory;
  * to implement {@link DOMStore} contract.
  *
  */
-public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, SchemaContextListener,
-        TransactionReadyPrototype,AutoCloseable {
+public class InMemoryDOMDataStore extends TransactionReadyPrototype implements DOMStore, Identifiable<String>, SchemaContextListener, AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(InMemoryDOMDataStore.class);
     private static final ListenableFuture<Void> SUCCESSFUL_FUTURE = Futures.immediateFuture(null);
 
@@ -82,29 +79,26 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
     private final DataTree dataTree = InMemoryDataTreeFactory.getInstance().create();
     private final ListenerTree listenerTree = ListenerTree.create();
     private final AtomicLong txCounter = new AtomicLong(0);
-    private final ListeningExecutorService listeningExecutor;
 
     private final QueuedNotificationManager<DataChangeListenerRegistration<?>, DOMImmutableDataChangeEvent> dataChangeListenerNotificationManager;
     private final ExecutorService dataChangeListenerExecutor;
-
-    private final ExecutorService domStoreExecutor;
+    private final ListeningExecutorService commitExecutor;
     private final boolean debugTransactions;
     private final String name;
 
     private volatile AutoCloseable closeable;
 
-    public InMemoryDOMDataStore(final String name, final ExecutorService domStoreExecutor,
+    public InMemoryDOMDataStore(final String name, final ListeningExecutorService commitExecutor,
             final ExecutorService dataChangeListenerExecutor) {
-        this(name, domStoreExecutor, dataChangeListenerExecutor,
+        this(name, commitExecutor, dataChangeListenerExecutor,
              InMemoryDOMDataStoreConfigProperties.DEFAULT_MAX_DATA_CHANGE_LISTENER_QUEUE_SIZE, false);
     }
 
-    public InMemoryDOMDataStore(final String name, final ExecutorService domStoreExecutor,
+    public InMemoryDOMDataStore(final String name, final ListeningExecutorService commitExecutor,
             final ExecutorService dataChangeListenerExecutor, final int maxDataChangeListenerQueueSize,
             final boolean debugTransactions) {
         this.name = Preconditions.checkNotNull(name);
-        this.domStoreExecutor = Preconditions.checkNotNull(domStoreExecutor);
-        this.listeningExecutor = MoreExecutors.listeningDecorator(this.domStoreExecutor);
+        this.commitExecutor = Preconditions.checkNotNull(commitExecutor);
         this.dataChangeListenerExecutor = Preconditions.checkNotNull(dataChangeListenerExecutor);
         this.debugTransactions = debugTransactions;
 
@@ -114,7 +108,7 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
                         "DataChangeListenerQueueMgr");
     }
 
-    public void setCloseable(AutoCloseable closeable) {
+    public void setCloseable(final AutoCloseable closeable) {
         this.closeable = closeable;
     }
 
@@ -123,7 +117,7 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
     }
 
     public ExecutorService getDomStoreExecutor() {
-        return domStoreExecutor;
+        return commitExecutor;
     }
 
     @Override
@@ -158,7 +152,7 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
 
     @Override
     public void close() {
-        ExecutorServiceUtil.tryGracefulShutdown(listeningExecutor, 30, TimeUnit.SECONDS);
+        ExecutorServiceUtil.tryGracefulShutdown(commitExecutor, 30, TimeUnit.SECONDS);
         ExecutorServiceUtil.tryGracefulShutdown(dataChangeListenerExecutor, 30, TimeUnit.SECONDS);
 
         if(closeable != null) {
@@ -215,80 +209,95 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
     }
 
     @Override
-    public DOMStoreThreePhaseCommitCohort ready(final SnapshotBackedWriteTransaction writeTx) {
-        LOG.debug("Tx: {} is submitted. Modifications: {}", writeTx.getIdentifier(), writeTx.getMutatedView());
-        return new ThreePhaseCommitImpl(writeTx);
+    protected void transactionAborted(final SnapshotBackedWriteTransaction tx) {
+        LOG.debug("Tx: {} is closed.", tx.getIdentifier());
+    }
+
+    @Override
+    protected DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
+        LOG.debug("Tx: {} is submitted. Modifications: {}", tx.getIdentifier(), tree);
+        return new ThreePhaseCommitImpl(tx, tree);
     }
 
     private Object nextIdentifier() {
         return name + "-" + txCounter.getAndIncrement();
     }
 
-    private class DOMStoreTransactionChainImpl implements DOMStoreTransactionChain, TransactionReadyPrototype {
-
+    private class DOMStoreTransactionChainImpl extends TransactionReadyPrototype implements DOMStoreTransactionChain {
+        @GuardedBy("this")
+        private SnapshotBackedWriteTransaction allocatedTransaction;
+        @GuardedBy("this")
+        private DataTreeSnapshot readySnapshot;
         @GuardedBy("this")
-        private SnapshotBackedWriteTransaction latestOutstandingTx;
-
         private boolean chainFailed = false;
 
+        @GuardedBy("this")
         private void checkFailed() {
             Preconditions.checkState(!chainFailed, "Transaction chain is failed.");
         }
 
-        @Override
-        public synchronized DOMStoreReadTransaction newReadOnlyTransaction() {
-            final DataTreeSnapshot snapshot;
+        @GuardedBy("this")
+        private DataTreeSnapshot getSnapshot() {
             checkFailed();
-            if (latestOutstandingTx != null) {
-                checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
-                snapshot = latestOutstandingTx.getMutatedView();
+
+            if (allocatedTransaction != null) {
+                Preconditions.checkState(readySnapshot != null, "Previous transaction %s is not ready yet", allocatedTransaction.getIdentifier());
+                return readySnapshot;
             } else {
-                snapshot = dataTree.takeSnapshot();
+                return dataTree.takeSnapshot();
             }
+        }
+
+        @GuardedBy("this")
+        private <T extends SnapshotBackedWriteTransaction> T recordTransaction(final T transaction) {
+            allocatedTransaction = transaction;
+            readySnapshot = null;
+            return transaction;
+        }
+
+        @Override
+        public synchronized DOMStoreReadTransaction newReadOnlyTransaction() {
+            final DataTreeSnapshot snapshot = getSnapshot();
             return new SnapshotBackedReadTransaction(nextIdentifier(), getDebugTransactions(), snapshot);
         }
 
         @Override
         public synchronized DOMStoreReadWriteTransaction newReadWriteTransaction() {
-            final DataTreeSnapshot snapshot;
-            checkFailed();
-            if (latestOutstandingTx != null) {
-                checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
-                snapshot = latestOutstandingTx.getMutatedView();
-            } else {
-                snapshot = dataTree.takeSnapshot();
-            }
-            final SnapshotBackedReadWriteTransaction ret = new SnapshotBackedReadWriteTransaction(nextIdentifier(),
-                    getDebugTransactions(), snapshot, this);
-            latestOutstandingTx = ret;
-            return ret;
+            final DataTreeSnapshot snapshot = getSnapshot();
+            return recordTransaction(new SnapshotBackedReadWriteTransaction(nextIdentifier(),
+                    getDebugTransactions(), snapshot, this));
         }
 
         @Override
         public synchronized DOMStoreWriteTransaction newWriteOnlyTransaction() {
-            final DataTreeSnapshot snapshot;
-            checkFailed();
-            if (latestOutstandingTx != null) {
-                checkState(latestOutstandingTx.isReady(), "Previous transaction in chain must be ready.");
-                snapshot = latestOutstandingTx.getMutatedView();
-            } else {
-                snapshot = dataTree.takeSnapshot();
+            final DataTreeSnapshot snapshot = getSnapshot();
+            return recordTransaction(new SnapshotBackedWriteTransaction(nextIdentifier(),
+                    getDebugTransactions(), snapshot, this));
+        }
+
+        @Override
+        protected synchronized void transactionAborted(final SnapshotBackedWriteTransaction tx) {
+            if (tx.equals(allocatedTransaction)) {
+                Preconditions.checkState(readySnapshot == null, "Unexpected abort of transaction %s with ready snapshot %s", tx, readySnapshot);
+                allocatedTransaction = null;
             }
-            final SnapshotBackedWriteTransaction ret = new SnapshotBackedWriteTransaction(nextIdentifier(),
-                    getDebugTransactions(), snapshot, this);
-            latestOutstandingTx = ret;
-            return ret;
         }
 
         @Override
-        public DOMStoreThreePhaseCommitCohort ready(final SnapshotBackedWriteTransaction tx) {
-            DOMStoreThreePhaseCommitCohort storeCohort = InMemoryDOMDataStore.this.ready(tx);
-            return new ChainedTransactionCommitImpl(tx, storeCohort, this);
+        protected synchronized DOMStoreThreePhaseCommitCohort transactionReady(final SnapshotBackedWriteTransaction tx, final DataTreeModification tree) {
+            Preconditions.checkState(tx.equals(allocatedTransaction), "Mis-ordered ready transaction %s last allocated was %s", tx, allocatedTransaction);
+            if (readySnapshot != null) {
+                // The snapshot should have been cleared
+                LOG.warn("Uncleared snapshot {} encountered, overwritten with transaction {} snapshot {}", readySnapshot, tx, tree);
+            }
+
+            final DOMStoreThreePhaseCommitCohort cohort = InMemoryDOMDataStore.this.transactionReady(tx, tree);
+            readySnapshot = tree;
+            return new ChainedTransactionCommitImpl(tx, cohort, this);
         }
 
         @Override
         public void close() {
-
             // FIXME: this call doesn't look right here - listeningExecutor is shared and owned
             // by the outer class.
             //listeningExecutor.shutdownNow();
@@ -297,31 +306,30 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
         protected synchronized void onTransactionFailed(final SnapshotBackedWriteTransaction transaction,
                 final Throwable t) {
             chainFailed = true;
-
         }
 
         public synchronized void onTransactionCommited(final SnapshotBackedWriteTransaction transaction) {
-            // If committed transaction is latestOutstandingTx we clear
-            // latestOutstandingTx
-            // field in order to base new transactions on Datastore Data Tree
-            // directly.
-            if (transaction.equals(latestOutstandingTx)) {
-                latestOutstandingTx = null;
+            // If the committed transaction was the one we allocated last,
+            // we clear it and the ready snapshot, so the next transaction
+            // allocated refers to the data tree directly.
+            if (transaction.equals(allocatedTransaction)) {
+                if (readySnapshot == null) {
+                    LOG.warn("Transaction {} committed while no ready snapshot present", transaction);
+                }
+
+                allocatedTransaction = null;
+                readySnapshot = null;
             }
         }
-
     }
 
     private static class ChainedTransactionCommitImpl implements DOMStoreThreePhaseCommitCohort {
-
         private final SnapshotBackedWriteTransaction transaction;
         private final DOMStoreThreePhaseCommitCohort delegate;
-
         private final DOMStoreTransactionChainImpl txChain;
 
         protected ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction,
                 final DOMStoreThreePhaseCommitCohort delegate, final DOMStoreTransactionChainImpl txChain) {
-            super();
             this.transaction = transaction;
             this.delegate = delegate;
             this.txChain = txChain;
@@ -355,29 +363,26 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
                 public void onSuccess(final Void result) {
                     txChain.onTransactionCommited(transaction);
                 }
-
             });
             return commitFuture;
         }
-
     }
 
     private class ThreePhaseCommitImpl implements DOMStoreThreePhaseCommitCohort {
-
         private final SnapshotBackedWriteTransaction transaction;
         private final DataTreeModification modification;
 
         private ResolveDataChangeEventsTask listenerResolver;
         private DataTreeCandidate candidate;
 
-        public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction) {
+        public ThreePhaseCommitImpl(final SnapshotBackedWriteTransaction writeTransaction, final DataTreeModification modification) {
             this.transaction = writeTransaction;
-            this.modification = transaction.getMutatedView();
+            this.modification = modification;
         }
 
         @Override
         public ListenableFuture<Boolean> canCommit() {
-            return listeningExecutor.submit(new Callable<Boolean>() {
+            return commitExecutor.submit(new Callable<Boolean>() {
                 @Override
                 public Boolean call() throws TransactionCommitFailedException {
                     try {
@@ -401,7 +406,7 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
 
         @Override
         public ListenableFuture<Void> preCommit() {
-            return listeningExecutor.submit(new Callable<Void>() {
+            return commitExecutor.submit(new Callable<Void>() {
                 @Override
                 public Void call() {
                     candidate = dataTree.prepare(modification);
@@ -425,7 +430,7 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
              * The commit has to occur atomically with regard to listener
              * registrations.
              */
-            synchronized (this) {
+            synchronized (InMemoryDOMDataStore.this) {
                 dataTree.commit(candidate);
                 listenerResolver.resolve(dataChangeListenerNotificationManager);
             }
index dc1482c6abaefb7880c7f6b55cc37c4d6ad65e3f..2ee8e182c255fef59d8b219fa565473e2e8f362a 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.md.sal.dom.store.impl;
 
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
 import java.util.concurrent.ExecutorService;
 import javax.annotation.Nullable;
 import org.opendaylight.controller.sal.core.api.model.SchemaService;
@@ -57,7 +59,7 @@ public final class InMemoryDOMDataStoreFactory {
             @Nullable final InMemoryDOMDataStoreConfigProperties properties) {
 
         InMemoryDOMDataStoreConfigProperties actualProperties = properties;
-        if(actualProperties == null) {
+        if (actualProperties == null) {
             actualProperties = InMemoryDOMDataStoreConfigProperties.getDefault();
         }
 
@@ -65,21 +67,18 @@ public final class InMemoryDOMDataStoreFactory {
         // task execution time to get higher throughput as DataChangeListeners typically provide
         // much of the business logic for a data model. If the executor queue size limit is reached,
         // subsequent submitted notifications will block the calling thread.
-
         int dclExecutorMaxQueueSize = actualProperties.getMaxDataChangeExecutorQueueSize();
         int dclExecutorMaxPoolSize = actualProperties.getMaxDataChangeExecutorPoolSize();
 
         ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
                 dclExecutorMaxPoolSize, dclExecutorMaxQueueSize, name + "-DCL" );
 
-        ExecutorService domStoreExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
-                actualProperties.getMaxDataStoreExecutorQueueSize(), "DOMStore-" + name );
-
-        InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name,
-                domStoreExecutor, dataChangeListenerExecutor,
+        final ListeningExecutorService commitExecutor = MoreExecutors.sameThreadExecutor();
+        final InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name,
+            commitExecutor, dataChangeListenerExecutor,
                 actualProperties.getMaxDataChangeListenerQueueSize(), debugTransactions);
 
-        if(schemaService != null) {
+        if (schemaService != null) {
             schemaService.registerSchemaContextListener(dataStore);
         }
 
index 5fe9866b12ed0ec6e2a669de4cc7bd3b9957b641..25ddbf5df25dc065ba11bcc64cdf059cfbacb14c 100644 (file)
@@ -11,10 +11,8 @@ import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ArrayListMultimap;
 import com.google.common.collect.Multimap;
-
 import java.util.Collection;
 import java.util.Map.Entry;
-
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
 import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.Builder;
 import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.SimpleEventFactory;
@@ -120,7 +118,9 @@ final class ResolveDataChangeEventsTask {
             Preconditions.checkArgument(node.getDataAfter().isPresent(),
                     "Modification at {} has type {} but no after-data", state.getPath(), node.getModificationType());
             if (!node.getDataBefore().isPresent()) {
-                resolveCreateEvent(state, node.getDataAfter().get());
+                @SuppressWarnings({ "unchecked", "rawtypes" })
+                final NormalizedNode<PathArgument, ?> afterNode = (NormalizedNode)node.getDataAfter().get();
+                resolveSameEventRecursivelly(state, afterNode, DOMImmutableDataChangeEvent.getCreateEventFactory());
                 return true;
             }
 
@@ -128,7 +128,10 @@ final class ResolveDataChangeEventsTask {
         case DELETE:
             Preconditions.checkArgument(node.getDataBefore().isPresent(),
                     "Modification at {} has type {} but no before-data", state.getPath(), node.getModificationType());
-            resolveDeleteEvent(state, node.getDataBefore().get());
+
+            @SuppressWarnings({ "unchecked", "rawtypes" })
+            final NormalizedNode<PathArgument, ?> beforeNode = (NormalizedNode)node.getDataBefore().get();
+            resolveSameEventRecursivelly(state, beforeNode, DOMImmutableDataChangeEvent.getRemoveEventFactory());
             return true;
         case UNMODIFIED:
             return false;
@@ -223,26 +226,6 @@ final class ResolveDataChangeEventsTask {
         return true;
     }
 
-    /**
-     * Resolves create events deep down the interest listener tree.
-     *
-     * @param path
-     * @param listeners
-     * @param afterState
-     * @return
-     */
-    private void resolveCreateEvent(final ResolveDataChangeState state, final NormalizedNode<?, ?> afterState) {
-        @SuppressWarnings({ "unchecked", "rawtypes" })
-        final NormalizedNode<PathArgument, ?> node = (NormalizedNode) afterState;
-        resolveSameEventRecursivelly(state, node, DOMImmutableDataChangeEvent.getCreateEventFactory());
-    }
-
-    private void resolveDeleteEvent(final ResolveDataChangeState state, final NormalizedNode<?, ?> beforeState) {
-        @SuppressWarnings({ "unchecked", "rawtypes" })
-        final NormalizedNode<PathArgument, ?> node = (NormalizedNode) beforeState;
-        resolveSameEventRecursivelly(state, node, DOMImmutableDataChangeEvent.getRemoveEventFactory());
-    }
-
     private void resolveSameEventRecursivelly(final ResolveDataChangeState state,
             final NormalizedNode<PathArgument, ?> node, final SimpleEventFactory eventFactory) {
         if (!state.needsProcessing()) {
@@ -277,6 +260,11 @@ final class ResolveDataChangeEventsTask {
         Preconditions.checkArgument(modification.getDataBefore().isPresent(), "Subtree change with before-data not present at path %s", state.getPath());
         Preconditions.checkArgument(modification.getDataAfter().isPresent(), "Subtree change with after-data not present at path %s", state.getPath());
 
+        if (!state.needsProcessing()) {
+            LOG.trace("Not processing modified subtree {}", state.getPath());
+            return true;
+        }
+
         DataChangeScope scope = null;
         for (DataTreeCandidateNode childMod : modification.getChildNodes()) {
             final ResolveDataChangeState childState = state.child(childMod.getIdentifier());
index d3c5a7cb70e8013734f64619b456e74ed18ee0e2..3db4115af67908bed3c4edfcfbb2d91cdae3baae 100644 (file)
@@ -47,16 +47,16 @@ final class ResolveDataChangeState {
     /**
      * Inherited from immediate parent
      */
-    private final Iterable<Builder> inheritedOne;
+    private final Collection<Builder> inheritedOne;
     private final YangInstanceIdentifier nodeId;
     private final Collection<Node> nodes;
 
-    private final Map<DataChangeListenerRegistration<?>, Builder> subBuilders = new HashMap<>();
-    private final Map<DataChangeListenerRegistration<?>, Builder> oneBuilders = new HashMap<>();
-    private final Map<DataChangeListenerRegistration<?>, Builder> baseBuilders = new HashMap<>();
+    private final Map<DataChangeListenerRegistration<?>, Builder> subBuilders;
+    private final Map<DataChangeListenerRegistration<?>, Builder> oneBuilders;
+    private final Map<DataChangeListenerRegistration<?>, Builder> baseBuilders;
 
     private ResolveDataChangeState(final YangInstanceIdentifier nodeId,
-            final Iterable<Builder> inheritedSub, final Iterable<Builder> inheritedOne,
+            final Iterable<Builder> inheritedSub, final Collection<Builder> inheritedOne,
             final Collection<Node> nodes) {
         this.nodeId = Preconditions.checkNotNull(nodeId);
         this.nodes = Preconditions.checkNotNull(nodes);
@@ -66,22 +66,36 @@ final class ResolveDataChangeState {
         /*
          * Collect the nodes which need to be propagated from us to the child.
          */
+        final Map<DataChangeListenerRegistration<?>, Builder> sub = new HashMap<>();
+        final Map<DataChangeListenerRegistration<?>, Builder> one = new HashMap<>();
+        final Map<DataChangeListenerRegistration<?>, Builder> base = new HashMap<>();
         for (Node n : nodes) {
             for (DataChangeListenerRegistration<?> l : n.getListeners()) {
                 final Builder b = DOMImmutableDataChangeEvent.builder(DataChangeScope.BASE);
                 switch (l.getScope()) {
                 case BASE:
-                    baseBuilders.put(l, b);
+                    base.put(l, b);
                     break;
                 case ONE:
-                    oneBuilders.put(l, b);
+                    one.put(l, b);
                     break;
                 case SUBTREE:
-                    subBuilders.put(l, b);
+                    sub.put(l, b);
                     break;
                 }
             }
         }
+
+        baseBuilders = maybeEmpty(base);
+        oneBuilders = maybeEmpty(one);
+        subBuilders = maybeEmpty(sub);
+    }
+
+    private static <K, V> Map<K, V> maybeEmpty(final Map<K, V> map) {
+        if (map.isEmpty()) {
+            return Collections.emptyMap();
+        }
+        return map;
     }
 
     /**
@@ -103,8 +117,38 @@ final class ResolveDataChangeState {
      * @return State handle
      */
     public ResolveDataChangeState child(final PathArgument childId) {
-        return new ResolveDataChangeState(nodeId.node(childId),
-            Iterables.concat(inheritedSub, subBuilders.values()),
+        /*
+         * We instantiate a concatenation only when needed:
+         *
+         * 1) If our collection is empty, we reuse the parent's. This is typically the case
+         *    for intermediate node, which should be the vast majority.
+         * 2) If the parent's iterable is a Collection and it is empty, reuse our collection.
+         *    This is the case for the first node which defines a subtree listener in a
+         *    particular subtree.
+         * 3) Concatenate the two collections. This happens when we already have some
+         *    subtree listeners and we encounter a node which adds a few more.
+         *
+         * This allows us to lower number of objects allocated and also
+         * speeds up Iterables.isEmpty() in needsProcessing().
+         *
+         * Note that the check for Collection in 2) relies on precisely this logic, which
+         * ensures that we simply cannot see an empty concatenation, but rather start off with
+         * an empty collection, then switch to a non-empty collection and finally switch to
+         * a concatenation. This saves us from instantiating iterators, which a trivial
+         * Iterables.isEmpty() would do as soon as we cross case 3).
+         */
+        final Iterable<Builder> sb;
+        if (!subBuilders.isEmpty()) {
+            if (inheritedSub instanceof Collection && ((Collection<?>) inheritedSub).isEmpty()) {
+                sb = subBuilders.values();
+            } else {
+                sb = Iterables.concat(inheritedSub, subBuilders.values());
+            }
+        } else {
+            sb = inheritedSub;
+        }
+
+        return new ResolveDataChangeState(nodeId.node(childId), sb,
             oneBuilders.values(), getListenerChildrenWildcarded(nodes, childId));
     }
 
@@ -127,16 +171,29 @@ final class ResolveDataChangeState {
         if (!nodes.isEmpty()) {
             return true;
         }
-        // Have SUBTREE listeners
-        if (!Iterables.isEmpty(inheritedSub)) {
-            return true;
-        }
         // Have ONE listeners
-        if (!Iterables.isEmpty(inheritedOne)) {
+        if (!inheritedOne.isEmpty()) {
             return true;
         }
 
-        return false;
+        /*
+         * Have SUBTREE listeners
+         *
+         * This is slightly magical replacement for !Iterables.isEmpty(inheritedSub).
+         * It relies on the logic in child(), which gives us the guarantee that when
+         * inheritedSub is not a Collection, it is guaranteed to be non-empty (which
+         * means we need to process). If it is a collection, we still need to check
+         * it for emptiness.
+         *
+         * Unlike Iterables.isEmpty() this code does not instantiate any temporary
+         * objects and is thus more efficient.
+         */
+        if (inheritedSub instanceof Collection) {
+            return !((Collection<?>) inheritedSub).isEmpty();
+        }
+
+        // Non-Collection => non-empty => have to process
+        return true;
     }
 
     /**
index 2ae7425bbb9de443d4cdae61dc90dbc94ba61234..30fa6da58bf99bd47f92758500adb8c317733d57 100644 (file)
@@ -8,16 +8,13 @@
 package org.opendaylight.controller.md.sal.dom.store.impl;
 
 import static com.google.common.base.Preconditions.checkNotNull;
-
 import com.google.common.base.Optional;
 import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.Futures;
-
 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeModification;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -27,9 +24,7 @@ import org.slf4j.LoggerFactory;
  * and executed according to {@link TransactionReadyPrototype}.
  *
  */
-class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction
-                                         implements DOMStoreReadWriteTransaction {
-
+final class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction implements DOMStoreReadWriteTransaction {
     private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadWriteTransaction.class);
 
     /**
@@ -49,16 +44,18 @@ class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction
         LOG.debug("Tx: {} Read: {}", getIdentifier(), path);
         checkNotNull(path, "Path must not be null.");
 
-        DataTreeModification dataView = getMutatedView();
-        if(dataView == null) {
-            return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed"));
-        }
-
+        final Optional<NormalizedNode<?, ?>> result;
         try {
-            return Futures.immediateCheckedFuture(dataView.readNode(path));
+            result = readSnapshotNode(path);
         } catch (Exception e) {
             LOG.error("Tx: {} Failed Read of {}", getIdentifier(), path, e);
-            return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed",e));
+            return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed", e));
+        }
+
+        if (result == null) {
+            return Futures.immediateFailedCheckedFuture(new ReadFailedException("Transaction is closed"));
+        } else {
+            return Futures.immediateCheckedFuture(result);
         }
     }
 
index 6129df74787b2fdb8b2da30d9cc2c78f04b288bc..60a23403b3ac7fce67b01b85295752cdbb2a2e6e 100644 (file)
@@ -8,11 +8,11 @@
 package org.opendaylight.controller.md.sal.dom.store.impl;
 
 import static com.google.common.base.Preconditions.checkState;
-
 import com.google.common.base.Objects.ToStringHelper;
+import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Throwables;
-
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -29,11 +29,16 @@ import org.slf4j.LoggerFactory;
  *
  */
 class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction implements DOMStoreWriteTransaction {
-
     private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedWriteTransaction.class);
-    private DataTreeModification mutableTree;
-    private boolean ready = false;
-    private TransactionReadyPrototype readyImpl;
+    private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, TransactionReadyPrototype> READY_UPDATER =
+            AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, TransactionReadyPrototype.class, "readyImpl");
+    private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, DataTreeModification> TREE_UPDATER =
+            AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, DataTreeModification.class, "mutableTree");
+
+    // non-null when not ready
+    private volatile TransactionReadyPrototype readyImpl;
+    // non-null when not committed/closed
+    private volatile DataTreeModification mutableTree;
 
     /**
      * Creates new write-only transaction.
@@ -48,27 +53,23 @@ class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction impleme
     public SnapshotBackedWriteTransaction(final Object identifier, final boolean debug,
             final DataTreeSnapshot snapshot, final TransactionReadyPrototype readyImpl) {
         super(identifier, debug);
-        mutableTree = snapshot.newModification();
         this.readyImpl = Preconditions.checkNotNull(readyImpl, "readyImpl must not be null.");
+        mutableTree = snapshot.newModification();
         LOG.debug("Write Tx: {} allocated with snapshot {}", identifier, snapshot);
     }
 
-    @Override
-    public void close() {
-        LOG.debug("Store transaction: {} : Closed", getIdentifier());
-        this.mutableTree = null;
-        this.readyImpl = null;
-    }
-
     @Override
     public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
         checkNotReady();
+
+        final DataTreeModification tree = mutableTree;
+        LOG.debug("Tx: {} Write: {}:{}", getIdentifier(), path, data);
+
         try {
-            LOG.debug("Tx: {} Write: {}:{}", getIdentifier(), path, data);
-            mutableTree.write(path, data);
+            tree.write(path, data);
             // FIXME: Add checked exception
         } catch (Exception e) {
-            LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, mutableTree, e);
+            LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e);
             // Rethrow original ones if they are subclasses of RuntimeException
             // or Error
             Throwables.propagateIfPossible(e);
@@ -80,12 +81,15 @@ class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction impleme
     @Override
     public void merge(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
         checkNotReady();
+
+        final DataTreeModification tree = mutableTree;
+        LOG.debug("Tx: {} Merge: {}:{}", getIdentifier(), path, data);
+
         try {
-            LOG.debug("Tx: {} Merge: {}:{}", getIdentifier(), path, data);
-            mutableTree.merge(path, data);
+            tree.merge(path, data);
             // FIXME: Add checked exception
         } catch (Exception e) {
-            LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, mutableTree, e);
+            LOG.error("Tx: {}, failed to write {}:{} in {}", getIdentifier(), path, data, tree, e);
             // Rethrow original ones if they are subclasses of RuntimeException
             // or Error
             Throwables.propagateIfPossible(e);
@@ -97,12 +101,15 @@ class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction impleme
     @Override
     public void delete(final YangInstanceIdentifier path) {
         checkNotReady();
+
+        final DataTreeModification tree = mutableTree;
+        LOG.debug("Tx: {} Delete: {}", getIdentifier(), path);
+
         try {
-            LOG.debug("Tx: {} Delete: {}", getIdentifier(), path);
-            mutableTree.delete(path);
+            tree.delete(path);
             // FIXME: Add checked exception
         } catch (Exception e) {
-            LOG.error("Tx: {}, failed to delete {} in {}", getIdentifier(), path, mutableTree, e);
+            LOG.error("Tx: {}, failed to delete {} in {}", getIdentifier(), path, tree, e);
             // Rethrow original ones if they are subclasses of RuntimeException
             // or Error
             Throwables.propagateIfPossible(e);
@@ -111,30 +118,49 @@ class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction impleme
         }
     }
 
-    protected final boolean isReady() {
-        return ready;
+    /**
+     * Exposed for {@link SnapshotBackedReadWriteTransaction}'s sake only. The contract does
+     * not allow data access after the transaction has been closed or readied.
+     *
+     * @param path Path to read
+     * @return null if the the transaction has been closed;
+     */
+    protected final Optional<NormalizedNode<?, ?>> readSnapshotNode(final YangInstanceIdentifier path) {
+        return readyImpl == null ? null : mutableTree.readNode(path);
     }
 
-    protected final void checkNotReady() {
-        checkState(!ready, "Transaction %s is ready. No further modifications allowed.", getIdentifier());
+    private final void checkNotReady() {
+        checkState(readyImpl != null, "Transaction %s is no longer open. No further modifications allowed.", getIdentifier());
     }
 
     @Override
-    public synchronized DOMStoreThreePhaseCommitCohort ready() {
-        checkState(!ready, "Transaction %s is already ready.", getIdentifier());
-        ready = true;
+    public DOMStoreThreePhaseCommitCohort ready() {
+        final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+        checkState(wasReady != null, "Transaction %s is no longer open", getIdentifier());
+
         LOG.debug("Store transaction: {} : Ready", getIdentifier());
-        mutableTree.ready();
-        return readyImpl.ready(this);
+
+        final DataTreeModification tree = mutableTree;
+        TREE_UPDATER.lazySet(this, null);
+        tree.ready();
+        return wasReady.transactionReady(this, tree);
     }
 
-    protected DataTreeModification getMutatedView() {
-        return mutableTree;
+    @Override
+    public void close() {
+        final TransactionReadyPrototype wasReady = READY_UPDATER.getAndSet(this, null);
+        if (wasReady != null) {
+            LOG.debug("Store transaction: {} : Closed", getIdentifier());
+            TREE_UPDATER.lazySet(this, null);
+            wasReady.transactionAborted(this);
+        } else {
+            LOG.debug("Store transaction: {} : Closed after submit", getIdentifier());
+        }
     }
 
     @Override
     protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
-        return toStringHelper.add("ready", isReady());
+        return toStringHelper.add("ready", readyImpl == null);
     }
 
     /**
@@ -146,7 +172,14 @@ class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction impleme
      * providing underlying logic for applying implementation.
      *
      */
-    public static interface TransactionReadyPrototype {
+    abstract static class TransactionReadyPrototype {
+        /**
+         * Called when a transaction is closed without being readied. This is not invoked for
+         * transactions which are ready.
+         *
+         * @param tx Transaction which got aborted.
+         */
+        protected abstract void transactionAborted(final SnapshotBackedWriteTransaction tx);
 
         /**
          * Returns a commit coordinator associated with supplied transactions.
@@ -155,8 +188,10 @@ class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction impleme
          *
          * @param tx
          *            Transaction on which ready was invoked.
+         * @param tree
+         *            Modified data tree which has been constructed.
          * @return DOMStoreThreePhaseCommitCohort associated with transaction
          */
-        DOMStoreThreePhaseCommitCohort ready(SnapshotBackedWriteTransaction tx);
+        protected abstract DOMStoreThreePhaseCommitCohort transactionReady(SnapshotBackedWriteTransaction tx, DataTreeModification tree);
     }
 }
\ No newline at end of file
index b3608eceef13d7006c007a90e6c715323d693221..e00be2446a5e690b1053b6373974d4f767ef0740 100644 (file)
@@ -9,7 +9,7 @@
 package org.opendaylight.controller.md.sal.dom.store.impl.jmx;
 
 import java.util.concurrent.ExecutorService;
-
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
 import org.opendaylight.controller.md.sal.common.util.jmx.QueuedNotificationManagerMXBeanImpl;
 import org.opendaylight.controller.md.sal.common.util.jmx.ThreadExecutorStatsMXBeanImpl;
 import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
@@ -21,24 +21,28 @@ import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
  */
 public class InMemoryDataStoreStats implements AutoCloseable {
 
-    private final ThreadExecutorStatsMXBeanImpl notificationExecutorStatsBean;
-    private final ThreadExecutorStatsMXBeanImpl dataStoreExecutorStatsBean;
+    private final AbstractMXBean notificationExecutorStatsBean;
+    private final AbstractMXBean dataStoreExecutorStatsBean;
     private final QueuedNotificationManagerMXBeanImpl notificationManagerStatsBean;
 
-    public InMemoryDataStoreStats(String mBeanType, QueuedNotificationManager<?, ?> manager,
-            ExecutorService dataStoreExecutor) {
+    public InMemoryDataStoreStats(final String mBeanType, final QueuedNotificationManager<?, ?> manager,
+            final ExecutorService dataStoreExecutor) {
 
-        this.notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
+        notificationManagerStatsBean = new QueuedNotificationManagerMXBeanImpl(manager,
                 "notification-manager", mBeanType, null);
         notificationManagerStatsBean.registerMBean();
 
-        this.notificationExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(manager.getExecutor(),
+        notificationExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(manager.getExecutor(),
                 "notification-executor", mBeanType, null);
-        this.notificationExecutorStatsBean.registerMBean();
+        if (notificationExecutorStatsBean != null) {
+            notificationExecutorStatsBean.registerMBean();
+        }
 
-        this.dataStoreExecutorStatsBean = new ThreadExecutorStatsMXBeanImpl(dataStoreExecutor,
+        dataStoreExecutorStatsBean = ThreadExecutorStatsMXBeanImpl.create(dataStoreExecutor,
                 "data-store-executor", mBeanType, null);
-        this.dataStoreExecutorStatsBean.registerMBean();
+        if (dataStoreExecutorStatsBean != null) {
+            dataStoreExecutorStatsBean.registerMBean();
+        }
     }
 
     @Override
index 0999efff0f6aed20bba510d2de207e316166c3df..09e178f5ceaa4e4709efa51dc9267a41f65b2d3f 100644 (file)
@@ -8,11 +8,10 @@ import com.google.common.base.Splitter;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
 import com.google.common.collect.Sets;
-
+import java.net.URI;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Set;
-
 import org.opendaylight.controller.netconf.client.NetconfClientSession;
 import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
 import org.opendaylight.yangtools.yang.common.QName;
@@ -119,6 +118,14 @@ public final class NetconfSessionCapabilities {
         return fromStrings(session.getServerCapabilities());
     }
 
+    private static QName cachedQName(final String namespace, final String revision, final String moduleName) {
+        return QName.cachedReference(QName.create(namespace, revision, moduleName));
+    }
+
+    private static QName cachedQName(final String namespace, final String moduleName) {
+        return QName.cachedReference(QName.create(URI.create(namespace), null, moduleName).withoutRevision());
+    }
+
     public static NetconfSessionCapabilities fromStrings(final Collection<String> capabilities) {
         final Set<QName> moduleBasedCaps = new HashSet<>();
         final Set<String> nonModuleCaps = Sets.newHashSet(capabilities);
@@ -138,8 +145,7 @@ public final class NetconfSessionCapabilities {
 
             String revision = REVISION_PARAM.from(queryParams);
             if (revision != null) {
-                moduleBasedCaps.add(QName.create(namespace, revision, moduleName));
-                nonModuleCaps.remove(capability);
+                addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, revision, moduleName));
                 continue;
             }
 
@@ -147,21 +153,29 @@ public final class NetconfSessionCapabilities {
              * We have seen devices which mis-escape revision, but the revision may not
              * even be there. First check if there is a substring that matches revision.
              */
-            if (!Iterables.any(queryParams, CONTAINS_REVISION)) {
+            if (Iterables.any(queryParams, CONTAINS_REVISION)) {
+
+                LOG.debug("Netconf device was not reporting revision correctly, trying to get amp;revision=");
+                revision = BROKEN_REVISON_PARAM.from(queryParams);
+                if (revision == null) {
+                    LOG.warn("Netconf device returned revision incorrectly escaped for {}, ignoring it", capability);
+                    addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, moduleName));
+                } else {
+                    addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, revision, moduleName));
+                }
                 continue;
             }
 
-            LOG.debug("Netconf device was not reporting revision correctly, trying to get amp;revision=");
-            revision = BROKEN_REVISON_PARAM.from(queryParams);
-            if (revision == null) {
-                LOG.warn("Netconf device returned revision incorrectly escaped for {}, ignoring it", capability);
-            }
-
-            // FIXME: do we really want to continue here?
-            moduleBasedCaps.add(QName.cachedReference(QName.create(namespace, revision, moduleName)));
-            nonModuleCaps.remove(capability);
+            // Fallback, no revision provided for module
+            addModuleQName(moduleBasedCaps, nonModuleCaps, capability, cachedQName(namespace, moduleName));
         }
 
         return new NetconfSessionCapabilities(ImmutableSet.copyOf(nonModuleCaps), ImmutableSet.copyOf(moduleBasedCaps));
     }
+
+
+    private static void addModuleQName(final Set<QName> moduleBasedCaps, final Set<String> nonModuleCaps, final String capability, final QName qName) {
+        moduleBasedCaps.add(qName);
+        nonModuleCaps.remove(capability);
+    }
 }
index 87947b57faf5b28bf840411503caf6b6353133ca..80bb08f5af399fee497465e8ad1e345726784b24 100644 (file)
@@ -43,6 +43,19 @@ public class NetconfSessionCapabilitiesTest {
         assertThat(merged.getNonModuleCaps(), JUnitMatchers.hasItem("urn:ietf:params:netconf:capability:rollback-on-error:1.0"));
     }
 
+    @Test
+    public void testCapabilityNoRevision() throws Exception {
+        final List<String> caps1 = Lists.newArrayList(
+                "namespace:2?module=module2",
+                "namespace:2?module=module2&amp;revision=2012-12-12",
+                "namespace:2?module=module1&amp;RANDOMSTRING;revision=2013-12-12",
+                "namespace:2?module=module2&amp;RANDOMSTRING;revision=2013-12-12" // This one should be ignored(same as first), since revision is in wrong format
+        );
+
+        final NetconfSessionCapabilities sessionCaps1 = NetconfSessionCapabilities.fromStrings(caps1);
+        assertCaps(sessionCaps1, 0, 3);
+    }
+
     private void assertCaps(final NetconfSessionCapabilities sessionCaps1, final int nonModuleCaps, final int moduleCaps) {
         assertEquals(nonModuleCaps, sessionCaps1.getNonModuleCaps().size());
         assertEquals(moduleCaps, sessionCaps1.getModuleBasedCaps().size());
index 8d454c4bd6e43ef54aa8c8a24f659e6be1244f68..0fb468be868be7dfa3081530b5068bdef13882cc 100644 (file)
      <groupId>com.typesafe.akka</groupId>
      <artifactId>akka-slf4j_${scala.version}</artifactId>
   </dependency>
+
+      <dependency>
+          <groupId>com.typesafe.akka</groupId>
+          <artifactId>akka-persistence-experimental_${scala.version}</artifactId>
+      </dependency>
     <!-- SAL Dependencies -->
 
     <dependency>
index 2be8ba47b99f9881304f58eeee3dede71497821c..c2e8125df2b5be843f2a6ed560ea3c211817c1b1 100644 (file)
@@ -1,5 +1,7 @@
 package org.opendaylight.controller.config.yang.config.remote_rpc_connector;
 
+import org.opendaylight.controller.cluster.common.actor.DefaultAkkaConfigurationReader;
+import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
 import org.opendaylight.controller.remote.rpc.RemoteRpcProviderFactory;
 import org.opendaylight.controller.sal.core.api.Broker;
 import org.osgi.framework.BundleContext;
@@ -22,7 +24,14 @@ public class RemoteRPCBrokerModule extends org.opendaylight.controller.config.ya
   @Override
   public java.lang.AutoCloseable createInstance() {
     Broker broker = getDomBrokerDependency();
-    return RemoteRpcProviderFactory.createInstance(broker, bundleContext);
+
+    RemoteRpcProviderConfig config = new RemoteRpcProviderConfig.Builder(getActorSystemName())
+                              .metricCaptureEnabled(getEnableMetricCapture())
+                              .mailboxCapacity(getBoundedMailboxCapacity())
+                              .withConfigReader(new DefaultAkkaConfigurationReader())
+                              .build();
+
+    return RemoteRpcProviderFactory.createInstance(broker, bundleContext, config);
   }
 
   public void setBundleContext(BundleContext bundleContext) {
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/AbstractUntypedActor.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/AbstractUntypedActor.java
deleted file mode 100644 (file)
index 66593ae..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.remote.rpc;
-
-import akka.actor.UntypedActor;
-import akka.event.Logging;
-import akka.event.LoggingAdapter;
-import org.opendaylight.controller.remote.rpc.messages.Monitor;
-
-public abstract class AbstractUntypedActor extends UntypedActor {
-    protected final LoggingAdapter LOG =
-        Logging.getLogger(getContext().system(), this);
-
-
-    public AbstractUntypedActor(){
-        LOG.debug("Actor created {}", getSelf());
-        getContext().
-            system().
-            actorSelection("user/termination-monitor").
-            tell(new Monitor(getSelf()), getSelf());
-    }
-
-    @Override public void onReceive(Object message) throws Exception {
-        LOG.debug("Received message {}", message);
-        handleReceive(message);
-        LOG.debug("Done handling message {}", message);
-    }
-
-    protected abstract void handleReceive(Object message) throws Exception;
-}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/ActorConstants.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/ActorConstants.java
deleted file mode 100644 (file)
index da0d628..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.remote.rpc;
-
-
-public class ActorConstants {
-  public static final String RPC_BROKER = "rpc-broker";
-  public static final String RPC_REGISTRY = "rpc-registry";
-  public static final String RPC_MANAGER = "rpc";
-
-  public static final String RPC_BROKER_PATH= "/user/rpc/rpc-broker";
-  public static final String RPC_REGISTRY_PATH = "/user/rpc/rpc-registry";
-  public static final String RPC_MANAGER_PATH = "/user/rpc";
-}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/ActorSystemFactory.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/ActorSystemFactory.java
deleted file mode 100644 (file)
index 6a442c5..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.remote.rpc;
-
-import akka.actor.ActorSystem;
-import akka.osgi.BundleDelegatingClassLoader;
-import org.opendaylight.controller.remote.rpc.utils.AkkaConfigurationReader;
-import org.osgi.framework.BundleContext;
-
-
-public class ActorSystemFactory {
-
-    public static final String ACTOR_SYSTEM_NAME = "opendaylight-cluster-rpc";
-    public static final String CONFIGURATION_NAME = "odl-cluster-rpc";
-
-    private static volatile ActorSystem actorSystem = null;
-
-  public static final ActorSystem getInstance(){
-     return actorSystem;
-  }
-
-  /**
-   * This method should be called only once during initialization
-   *
-   * @param bundleContext
-   */
-  public static final void createInstance(final BundleContext bundleContext, AkkaConfigurationReader akkaConfigurationReader) {
-    if(actorSystem == null) {
-      // Create an OSGi bundle classloader for actor system
-      BundleDelegatingClassLoader classLoader = new BundleDelegatingClassLoader(bundleContext.getBundle(),
-          Thread.currentThread().getContextClassLoader());
-      synchronized (ActorSystemFactory.class) {
-        // Double check
-        if (actorSystem == null) {
-          ActorSystem system = ActorSystem.create(ACTOR_SYSTEM_NAME,
-              akkaConfigurationReader.read().getConfig(CONFIGURATION_NAME), classLoader);
-          actorSystem = system;
-        }
-      }
-    } else {
-      throw new IllegalStateException("Actor system should be created only once. Use getInstance method to access existing actor system");
-    }
-  }
-
-}
index 7d7dbf0f3a58bc404882ad78186340d8eef2aba9..0f84abb22e8e6ca92d36e5486dac0f08ba699720 100644 (file)
@@ -1,42 +1,40 @@
 package org.opendaylight.controller.remote.rpc;
 
-import static akka.pattern.Patterns.ask;
 import akka.actor.ActorRef;
 import akka.dispatch.OnComplete;
-import akka.util.Timeout;
-
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
-
 import org.opendaylight.controller.remote.rpc.messages.InvokeRpc;
 import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
-import org.opendaylight.controller.remote.rpc.utils.ActorUtil;
-import org.opendaylight.controller.xml.codec.XmlUtils;
 import org.opendaylight.controller.sal.core.api.RoutedRpcDefaultImplementation;
 import org.opendaylight.controller.sal.core.api.RpcImplementation;
+import org.opendaylight.controller.xml.codec.XmlUtils;
 import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
-import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
 import org.opendaylight.yangtools.yang.data.api.CompositeNode;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-
 import scala.concurrent.ExecutionContext;
 
 import java.util.Collections;
 import java.util.Set;
 
+import static akka.pattern.Patterns.ask;
+
 public class RemoteRpcImplementation implements RpcImplementation, RoutedRpcDefaultImplementation {
     private static final Logger LOG = LoggerFactory.getLogger(RemoteRpcImplementation.class);
     private final ActorRef rpcBroker;
     private final SchemaContext schemaContext;
+    private final RemoteRpcProviderConfig config;
 
-    public RemoteRpcImplementation(ActorRef rpcBroker, SchemaContext schemaContext) {
+    public RemoteRpcImplementation(ActorRef rpcBroker, SchemaContext schemaContext, RemoteRpcProviderConfig config) {
         this.rpcBroker = rpcBroker;
         this.schemaContext = schemaContext;
+        this.config = config;
     }
 
     @Override
@@ -63,8 +61,7 @@ public class RemoteRpcImplementation implements RpcImplementation, RoutedRpcDefa
 
         final SettableFuture<RpcResult<CompositeNode>> listenableFuture = SettableFuture.create();
 
-        scala.concurrent.Future<Object> future = ask(rpcBroker, rpcMsg,
-                new Timeout(ActorUtil.ASK_DURATION));
+        scala.concurrent.Future<Object> future = ask(rpcBroker, rpcMsg, config.getAskDuration());
 
         OnComplete<Object> onComplete = new OnComplete<Object>() {
             @Override
index d088f2284d65904cc90d3aa3d6f39a4ca2cc99aa..8b4ce31d2ea0ee7e82352e72fd5249b37e6873a6 100644 (file)
@@ -31,21 +31,25 @@ public class RemoteRpcProvider implements AutoCloseable, Provider, SchemaContext
 
   private static final Logger LOG = LoggerFactory.getLogger(RemoteRpcProvider.class);
 
-  private final ActorSystem actorSystem;
   private final RpcProvisionRegistry rpcProvisionRegistry;
+
+  private ActorSystem actorSystem;
   private Broker.ProviderSession brokerSession;
   private SchemaContext schemaContext;
   private ActorRef rpcManager;
+  private RemoteRpcProviderConfig config;
 
 
   public RemoteRpcProvider(ActorSystem actorSystem, RpcProvisionRegistry rpcProvisionRegistry) {
     this.actorSystem = actorSystem;
     this.rpcProvisionRegistry = rpcProvisionRegistry;
+    this.config = new RemoteRpcProviderConfig(actorSystem.settings().config());
   }
 
   @Override
   public void close() throws Exception {
-    this.actorSystem.shutdown();
+    if (this.actorSystem != null)
+      this.actorSystem.shutdown();
   }
 
   @Override
@@ -60,17 +64,17 @@ public class RemoteRpcProvider implements AutoCloseable, Provider, SchemaContext
   }
 
   private void start() {
-    LOG.info("Starting all rpc listeners and actors.");
-    // Create actor to handle and sync routing table in cluster
+    LOG.info("Starting remote rpc service...");
+
     SchemaService schemaService = brokerSession.getService(SchemaService.class);
     schemaContext = schemaService.getGlobalContext();
 
-    rpcManager = actorSystem.actorOf(RpcManager.props(schemaContext, brokerSession, rpcProvisionRegistry), ActorConstants.RPC_MANAGER);
+    rpcManager = actorSystem.actorOf(RpcManager.props(schemaContext, brokerSession, rpcProvisionRegistry),
+                                     config.getRpcManagerName());
 
-    LOG.debug("Rpc actors are created.");
+    LOG.debug("rpc manager started");
   }
 
-
   @Override
   public void onGlobalContextUpdated(SchemaContext schemaContext) {
     this.schemaContext = schemaContext;
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderConfig.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderConfig.java
new file mode 100644 (file)
index 0000000..3f6d42d
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import akka.util.Timeout;
+import com.typesafe.config.Config;
+import org.opendaylight.controller.cluster.common.actor.CommonConfig;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ */
+public class RemoteRpcProviderConfig extends CommonConfig {
+
+    protected static final String TAG_RPC_BROKER_NAME = "rpc-broker-name";
+    protected static final String TAG_RPC_REGISTRY_NAME = "registry-name";
+    protected static final String TAG_RPC_MGR_NAME = "rpc-manager-name";
+    protected static final String TAG_RPC_BROKER_PATH = "rpc-broker-path";
+    protected static final String TAG_RPC_REGISTRY_PATH = "rpc-registry-path";
+    protected static final String TAG_RPC_MGR_PATH = "rpc-manager-path";
+    protected static final String TAG_ASK_DURATION = "ask-duration";
+    private static final String TAG_GOSSIP_TICK_INTERVAL = "gossip-tick-interval";
+
+    //locally cached values
+    private Timeout cachedAskDuration;
+    private FiniteDuration cachedGossipTickInterval;
+
+    public RemoteRpcProviderConfig(Config config){
+        super(config);
+    }
+
+    public String getRpcBrokerName(){
+        return get().getString(TAG_RPC_BROKER_NAME);
+    }
+
+    public String getRpcRegistryName(){
+        return get().getString(TAG_RPC_REGISTRY_NAME);
+    }
+
+    public String getRpcManagerName(){
+        return get().getString(TAG_RPC_MGR_NAME);
+    }
+
+    public String getRpcBrokerPath(){
+        return get().getString(TAG_RPC_BROKER_PATH);
+    }
+
+    public String getRpcRegistryPath(){
+        return get().getString(TAG_RPC_REGISTRY_PATH);
+
+    }
+
+    public String getRpcManagerPath(){
+        return get().getString(TAG_RPC_MGR_PATH);
+    }
+
+
+    public Timeout getAskDuration(){
+        if (cachedAskDuration != null){
+            return cachedAskDuration;
+        }
+
+        cachedAskDuration = new Timeout(new FiniteDuration(
+                get().getDuration(TAG_ASK_DURATION, TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS));
+
+        return cachedAskDuration;
+    }
+
+    public FiniteDuration getGossipTickInterval(){
+        if (cachedGossipTickInterval != null) {
+            return cachedGossipTickInterval;
+        }
+
+        cachedGossipTickInterval = new FiniteDuration(
+                get().getDuration(TAG_GOSSIP_TICK_INTERVAL, TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);
+
+        return cachedGossipTickInterval;
+    }
+
+    public static class Builder extends CommonConfig.Builder<Builder>{
+
+        public Builder(String actorSystemName){
+            super(actorSystemName);
+
+            //Actor names
+            configHolder.put(TAG_RPC_BROKER_NAME, "broker");
+            configHolder.put(TAG_RPC_REGISTRY_NAME, "registry");
+            configHolder.put(TAG_RPC_MGR_NAME, "rpc");
+
+            //Actor paths
+            configHolder.put(TAG_RPC_BROKER_PATH, "/user/rpc/broker");
+            configHolder.put(TAG_RPC_REGISTRY_PATH, "/user/rpc/registry");
+            configHolder.put(TAG_RPC_MGR_PATH, "/user/rpc");
+
+            //durations
+            configHolder.put(TAG_ASK_DURATION, "15s");
+            configHolder.put(TAG_GOSSIP_TICK_INTERVAL, "500ms");
+
+        }
+
+        public RemoteRpcProviderConfig build(){
+            return new RemoteRpcProviderConfig(merge());
+        }
+    }
+
+
+}
index 0e6b795c058877069640a848fe1144575db37443..c82a72eaa56c82e40388b8fb612eed7b198dbae8 100644 (file)
@@ -8,19 +8,44 @@
 
 package org.opendaylight.controller.remote.rpc;
 
-
-import org.opendaylight.controller.remote.rpc.utils.DefaultAkkaConfigurationReader;
+import akka.actor.ActorSystem;
+import akka.osgi.BundleDelegatingClassLoader;
+import com.typesafe.config.Config;
 import org.opendaylight.controller.sal.core.api.Broker;
 import org.opendaylight.controller.sal.core.api.RpcProvisionRegistry;
 import org.osgi.framework.BundleContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class RemoteRpcProviderFactory {
-    public static RemoteRpcProvider createInstance(final Broker broker, final BundleContext bundleContext){
+    private static final Logger LOG = LoggerFactory.getLogger(RemoteRpcProviderFactory.class);
+
+    public static RemoteRpcProvider createInstance(
+            final Broker broker, final BundleContext bundleContext, final RemoteRpcProviderConfig config){
 
-      ActorSystemFactory.createInstance(bundleContext, new DefaultAkkaConfigurationReader());
       RemoteRpcProvider rpcProvider =
-          new RemoteRpcProvider(ActorSystemFactory.getInstance(), (RpcProvisionRegistry) broker);
+          new RemoteRpcProvider(createActorSystem(bundleContext, config), (RpcProvisionRegistry) broker);
+
       broker.registerProvider(rpcProvider);
       return rpcProvider;
     }
+
+    private static ActorSystem createActorSystem(BundleContext bundleContext, RemoteRpcProviderConfig config){
+
+        // Create an OSGi bundle classloader for actor system
+        BundleDelegatingClassLoader classLoader =
+                new BundleDelegatingClassLoader(bundleContext.getBundle(),
+                        Thread.currentThread().getContextClassLoader());
+
+        Config actorSystemConfig = config.get();
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Actor system configuration\n{}", actorSystemConfig.root().render());
+        }
+        if (config.isMetricCaptureEnabled()) {
+            LOG.info("Instrumentation is enabled in actor system {}. Metrics can be viewed in JMX console.",
+                    config.getActorSystemName());
+        }
+
+        return ActorSystem.create(config.getActorSystemName(), actorSystemConfig, classLoader);
+    }
 }
index 98cf6a329f655f3caeebb39b989a1b761ca9a5cd..2aaac5a78ed531fc830bfca7540d2603a2e0f41b 100644 (file)
@@ -53,7 +53,9 @@ public class RoutedRpcListener implements RouteChangeListener<RpcRoutingContext,
    * @param announcements
    */
   private void announce(Set<RpcRouter.RouteIdentifier<?, ?, ?>> announcements) {
-    LOG.debug("Announcing [{}]", announcements);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Announcing [{}]", announcements);
+    }
     RpcRegistry.Messages.AddOrUpdateRoutes addRpcMsg = new RpcRegistry.Messages.AddOrUpdateRoutes(new ArrayList<>(announcements));
     rpcRegistry.tell(addRpcMsg, ActorRef.noSender());
   }
@@ -63,7 +65,9 @@ public class RoutedRpcListener implements RouteChangeListener<RpcRoutingContext,
    * @param removals
    */
   private void remove(Set<RpcRouter.RouteIdentifier<?, ?, ?>> removals){
-    LOG.debug("Removing [{}]", removals);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Removing [{}]", removals);
+    }
     RpcRegistry.Messages.RemoveRoutes removeRpcMsg = new RpcRegistry.Messages.RemoveRoutes(new ArrayList<>(removals));
     rpcRegistry.tell(removeRpcMsg, ActorRef.noSender());
   }
index 2aca655d2628eb9d89295d09419d0cd44f7491d7..2046e419d9f2602b444becf6986fe2c73bb9756e 100644 (file)
@@ -8,25 +8,26 @@
 
 package org.opendaylight.controller.remote.rpc;
 
-import static akka.pattern.Patterns.ask;
 import akka.actor.ActorRef;
 import akka.actor.Props;
 import akka.dispatch.OnComplete;
 import akka.japi.Creator;
 import akka.japi.Pair;
-import akka.util.Timeout;
-
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.JdkFutureAdapters;
+import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 import org.opendaylight.controller.remote.rpc.messages.ExecuteRpc;
 import org.opendaylight.controller.remote.rpc.messages.InvokeRpc;
 import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
-import org.opendaylight.controller.remote.rpc.utils.LatestEntryRoutingLogic;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
-import org.opendaylight.controller.remote.rpc.utils.ActorUtil;
+import org.opendaylight.controller.remote.rpc.utils.LatestEntryRoutingLogic;
 import org.opendaylight.controller.remote.rpc.utils.RoutingLogic;
-import org.opendaylight.controller.xml.codec.XmlUtils;
 import org.opendaylight.controller.sal.connector.api.RpcRouter;
 import org.opendaylight.controller.sal.core.api.Broker;
 import org.opendaylight.controller.sal.core.api.Broker.ProviderSession;
+import org.opendaylight.controller.xml.codec.XmlUtils;
 import org.opendaylight.yangtools.yang.common.RpcError;
 import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
 import org.opendaylight.yangtools.yang.common.RpcResult;
@@ -36,16 +37,13 @@ import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.JdkFutureAdapters;
-import com.google.common.util.concurrent.ListenableFuture;
-
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.Future;
 
+import static akka.pattern.Patterns.ask;
+
 /**
  * Actor to initiate execution of remote RPC on other nodes of the cluster.
  */
@@ -56,12 +54,14 @@ public class RpcBroker extends AbstractUntypedActor {
     private final Broker.ProviderSession brokerSession;
     private final ActorRef rpcRegistry;
     private final SchemaContext schemaContext;
+    private final RemoteRpcProviderConfig config;
 
     private RpcBroker(Broker.ProviderSession brokerSession, ActorRef rpcRegistry,
             SchemaContext schemaContext) {
         this.brokerSession = brokerSession;
         this.rpcRegistry = rpcRegistry;
         this.schemaContext = schemaContext;
+        config = new RemoteRpcProviderConfig(getContext().system().settings().config());
     }
 
     public static Props props(Broker.ProviderSession brokerSession, ActorRef rpcRegistry,
@@ -79,14 +79,14 @@ public class RpcBroker extends AbstractUntypedActor {
     }
 
     private void invokeRemoteRpc(final InvokeRpc msg) {
-        LOG.debug("Looking up the remote actor for rpc {}", msg.getRpc());
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Looking up the remote actor for rpc {}", msg.getRpc());
+        }
         RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(
                 null, msg.getRpc(), msg.getIdentifier());
         RpcRegistry.Messages.FindRouters findMsg = new RpcRegistry.Messages.FindRouters(routeId);
 
-        scala.concurrent.Future<Object> future = ask(rpcRegistry, findMsg,
-                new Timeout(ActorUtil.LOCAL_ASK_DURATION));
+        scala.concurrent.Future<Object> future = ask(rpcRegistry, findMsg, config.getAskDuration());
 
         final ActorRef sender = getSender();
         final ActorRef self = self();
@@ -129,8 +129,7 @@ public class RpcBroker extends AbstractUntypedActor {
         ExecuteRpc executeMsg = new ExecuteRpc(XmlUtils.inputCompositeNodeToXml(msg.getInput(),
                 schemaContext), msg.getRpc());
 
-        scala.concurrent.Future<Object> future = ask(logic.select(), executeMsg,
-                new Timeout(ActorUtil.REMOTE_ASK_DURATION));
+        scala.concurrent.Future<Object> future = ask(logic.select(), executeMsg, config.getAskDuration());
 
         OnComplete<Object> onComplete = new OnComplete<Object>() {
             @Override
@@ -149,8 +148,9 @@ public class RpcBroker extends AbstractUntypedActor {
     }
 
     private void executeRpc(final ExecuteRpc msg) {
-        LOG.debug("Executing rpc {}", msg.getRpc());
-
+        if(LOG.isDebugEnabled()) {
+            LOG.debug("Executing rpc {}", msg.getRpc());
+        }
         Future<RpcResult<CompositeNode>> future = brokerSession.rpc(msg.getRpc(),
                 XmlUtils.inputXmlToCompositeNode(msg.getRpc(), msg.getInputCompositeNode(),
                         schemaContext));
index dee98521ae9f2d56893d591b30bc30a8489d89ef..22879dda2f903f6008c5a7c21a2a6447acca12bf 100644 (file)
@@ -31,7 +31,9 @@ public class RpcListener implements RpcRegistrationListener{
 
   @Override
   public void onRpcImplementationAdded(QName rpc) {
-    LOG.debug("Adding registration for [{}]", rpc);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Adding registration for [{}]", rpc);
+    }
     RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(null, rpc, null);
     List<RpcRouter.RouteIdentifier<?,?,?>> routeIds = new ArrayList<>();
     routeIds.add(routeId);
@@ -41,7 +43,9 @@ public class RpcListener implements RpcRegistrationListener{
 
   @Override
   public void onRpcImplementationRemoved(QName rpc) {
-    LOG.debug("Removing registration for [{}]", rpc);
+    if(LOG.isDebugEnabled()) {
+        LOG.debug("Removing registration for [{}]", rpc);
+    }
     RpcRouter.RouteIdentifier<?,?,?> routeId = new RouteIdentifierImpl(null, rpc, null);
     List<RpcRouter.RouteIdentifier<?,?,?>> routeIds = new ArrayList<>();
     routeIds.add(routeId);
index d4da226b9dc4278cd508e83082283a3163c6615c..4ae9c2e4d097b36d71a9a3207e14ee35fcff3c83 100644 (file)
@@ -15,11 +15,9 @@ import akka.actor.Props;
 import akka.actor.SupervisorStrategy;
 import akka.japi.Creator;
 import akka.japi.Function;
-import com.typesafe.config.Config;
-import com.typesafe.config.ConfigFactory;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
 import org.opendaylight.controller.remote.rpc.messages.UpdateSchemaContext;
 import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
-import org.opendaylight.controller.remote.rpc.utils.ActorUtil;
 import org.opendaylight.controller.sal.core.api.Broker;
 import org.opendaylight.controller.sal.core.api.RpcProvisionRegistry;
 import org.opendaylight.yangtools.yang.common.QName;
@@ -27,6 +25,7 @@ import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.duration.Duration;
+
 import java.util.Set;
 
 /**
@@ -43,16 +42,19 @@ public class RpcManager extends AbstractUntypedActor {
   private ActorRef rpcBroker;
   private ActorRef rpcRegistry;
   private final Broker.ProviderSession brokerSession;
+  private final RemoteRpcProviderConfig config;
   private RpcListener rpcListener;
   private RoutedRpcListener routeChangeListener;
   private RemoteRpcImplementation rpcImplementation;
   private final RpcProvisionRegistry rpcProvisionRegistry;
 
   private RpcManager(SchemaContext schemaContext,
-                     Broker.ProviderSession brokerSession, RpcProvisionRegistry rpcProvisionRegistry) {
+                     Broker.ProviderSession brokerSession,
+                     RpcProvisionRegistry rpcProvisionRegistry) {
     this.schemaContext = schemaContext;
     this.brokerSession = brokerSession;
     this.rpcProvisionRegistry = rpcProvisionRegistry;
+    this.config = new RemoteRpcProviderConfig(getContext().system().settings().config());
 
     createRpcActors();
     startListeners();
@@ -60,7 +62,8 @@ public class RpcManager extends AbstractUntypedActor {
 
 
   public static Props props(final SchemaContext schemaContext,
-                            final Broker.ProviderSession brokerSession, final RpcProvisionRegistry rpcProvisionRegistry) {
+                            final Broker.ProviderSession brokerSession,
+                            final RpcProvisionRegistry rpcProvisionRegistry) {
     return Props.create(new Creator<RpcManager>() {
       @Override
       public RpcManager create() throws Exception {
@@ -72,15 +75,13 @@ public class RpcManager extends AbstractUntypedActor {
   private void createRpcActors() {
     LOG.debug("Create rpc registry and broker actors");
 
-      Config conf = ConfigFactory.load();
-
     rpcRegistry =
             getContext().actorOf(Props.create(RpcRegistry.class).
-                withMailbox(ActorUtil.MAILBOX), ActorConstants.RPC_REGISTRY);
+                withMailbox(config.getMailBoxName()), config.getRpcRegistryName());
 
     rpcBroker =
             getContext().actorOf(RpcBroker.props(brokerSession, rpcRegistry, schemaContext).
-                withMailbox(ActorUtil.MAILBOX),ActorConstants.RPC_BROKER);
+                withMailbox(config.getMailBoxName()), config.getRpcBrokerName());
 
     RpcRegistry.Messages.SetLocalRouter localRouter = new RpcRegistry.Messages.SetLocalRouter(rpcBroker);
     rpcRegistry.tell(localRouter, self());
@@ -91,7 +92,7 @@ public class RpcManager extends AbstractUntypedActor {
 
     rpcListener = new RpcListener(rpcRegistry);
     routeChangeListener = new RoutedRpcListener(rpcRegistry);
-    rpcImplementation = new RemoteRpcImplementation(rpcBroker, schemaContext);
+    rpcImplementation = new RemoteRpcImplementation(rpcBroker, schemaContext, config);
 
     brokerSession.addRpcRegistrationListener(rpcListener);
     rpcProvisionRegistry.registerRouteChangeListener(routeChangeListener);
index a90f1e1ed251204d6d3e4a45b6516bb783dbb41a..48ccd824d41cf6b1456007012d6801d028443fce 100644 (file)
@@ -12,7 +12,7 @@ import akka.actor.Terminated;
 import akka.actor.UntypedActor;
 import akka.event.Logging;
 import akka.event.LoggingAdapter;
-import org.opendaylight.controller.remote.rpc.messages.Monitor;
+import org.opendaylight.controller.cluster.common.actor.Monitor;
 
 public class TerminationMonitor extends UntypedActor{
     protected final LoggingAdapter LOG =
@@ -25,7 +25,9 @@ public class TerminationMonitor extends UntypedActor{
     @Override public void onReceive(Object message) throws Exception {
         if(message instanceof Terminated){
             Terminated terminated = (Terminated) message;
-            LOG.debug("Actor terminated : {}", terminated.actor());
+            if(LOG.isDebugEnabled()) {
+                LOG.debug("Actor terminated : {}", terminated.actor());
+            }
         }else if(message instanceof Monitor){
           Monitor monitor = (Monitor) message;
           getContext().watch(monitor.getActorRef());
index 5109d316446b13158e3739824e653e0259929135..095d70926b90d3838a777cc14a6976b31ccd9c97 100644 (file)
@@ -10,7 +10,6 @@ package org.opendaylight.controller.remote.rpc.registry;
 import akka.actor.ActorRef;
 import akka.actor.Address;
 import akka.actor.Props;
-import akka.actor.UntypedActor;
 import akka.dispatch.Mapper;
 import akka.event.Logging;
 import akka.event.LoggingAdapter;
@@ -18,9 +17,10 @@ import akka.japi.Option;
 import akka.japi.Pair;
 import akka.pattern.Patterns;
 import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
+import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
 import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
 import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore;
-import org.opendaylight.controller.remote.rpc.utils.ActorUtil;
 import org.opendaylight.controller.sal.connector.api.RpcRouter;
 import scala.concurrent.Future;
 
@@ -30,9 +30,9 @@ import java.util.List;
 import java.util.Map;
 
 import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.AddOrUpdateRoutes;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRouters;
 import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.RemoveRoutes;
 import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.SetLocalRouter;
-import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRouters;
 import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBuckets;
 import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBucketsReply;
 import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucket;
@@ -45,7 +45,7 @@ import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.Bu
  * It uses {@link org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore} to maintain this
  * cluster wide information.
  */
-public class RpcRegistry extends UntypedActor {
+public class RpcRegistry extends AbstractUntypedActorWithMetering {
 
     final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
 
@@ -59,9 +59,11 @@ public class RpcRegistry extends UntypedActor {
      */
     private ActorRef localRouter;
 
+    private RemoteRpcProviderConfig config;
+
     public RpcRegistry() {
         bucketStore = getContext().actorOf(Props.create(BucketStore.class), "store");
-
+        this.config = new RemoteRpcProviderConfig(getContext().system().settings().config());
         log.info("Bucket store path = {}", bucketStore.path().toString());
     }
 
@@ -69,11 +71,9 @@ public class RpcRegistry extends UntypedActor {
         this.bucketStore = bucketStore;
     }
 
-    @Override
-    public void onReceive(Object message) throws Exception {
-
-        log.debug("Received message: message [{}]", message);
 
+    @Override
+    protected void handleReceive(Object message) throws Exception {
         //TODO: if sender is remote, reject message
 
         if (message instanceof SetLocalRouter)
@@ -108,7 +108,7 @@ public class RpcRegistry extends UntypedActor {
 
         Preconditions.checkState(localRouter != null, "Router must be set first");
 
-        Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), ActorUtil.ASK_DURATION.toMillis());
+        Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), config.getAskDuration());
         futureReply.map(getMapperToAddRoutes(msg.getRouteIdentifiers()), getContext().dispatcher());
     }
 
@@ -117,7 +117,7 @@ public class RpcRegistry extends UntypedActor {
      */
     private void receiveRemoveRoutes(RemoveRoutes msg) {
 
-        Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), ActorUtil.ASK_DURATION.toMillis());
+        Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), config.getAskDuration());
         futureReply.map(getMapperToRemoveRoutes(msg.getRouteIdentifiers()), getContext().dispatcher());
 
     }
@@ -130,7 +130,7 @@ public class RpcRegistry extends UntypedActor {
     private void receiveGetRouter(FindRouters msg) {
         final ActorRef sender = getSender();
 
-        Future<Object> futureReply = Patterns.ask(bucketStore, new GetAllBuckets(), ActorUtil.ASK_DURATION.toMillis());
+        Future<Object> futureReply = Patterns.ask(bucketStore, new GetAllBuckets(), config.getAskDuration());
         futureReply.map(getMapperToGetRouter(msg.getRouteIdentifier(), sender), getContext().dispatcher());
     }
 
@@ -151,7 +151,8 @@ public class RpcRegistry extends UntypedActor {
      * @param routeId
      * @return
      */
-    private Messages.FindRoutersReply createReplyWithRouters(Map<Address, Bucket> buckets, RpcRouter.RouteIdentifier<?, ?, ?> routeId) {
+    private Messages.FindRoutersReply createReplyWithRouters(
+            Map<Address, Bucket> buckets, RpcRouter.RouteIdentifier<?, ?, ?> routeId) {
 
         List<Pair<ActorRef, Long>> routers = new ArrayList<>();
         Option<Pair<ActorRef, Long>> routerWithUpdateTime = null;
@@ -184,7 +185,8 @@ public class RpcRegistry extends UntypedActor {
      * @param sender  client who asked to find the routers.
      * @return
      */
-    private Mapper<Object, Void> getMapperToGetRouter(final RpcRouter.RouteIdentifier<?, ?, ?> routeId, final ActorRef sender) {
+    private Mapper<Object, Void> getMapperToGetRouter(
+            final RpcRouter.RouteIdentifier<?, ?, ?> routeId, final ActorRef sender) {
         return new Mapper<Object, Void>() {
             @Override
             public Void apply(Object replyMessage) {
index 3cdd924e8574afeee7b889f91905bbedd9a9e7f0..4dac456dc426d3493c8988355f40a79ca1df97ab 100644 (file)
@@ -11,7 +11,7 @@ import java.io.Serializable;
 
 public class BucketImpl<T extends Copier<T>> implements Bucket<T>, Serializable {
 
-    private Long version = System.currentTimeMillis();;
+    private Long version = System.currentTimeMillis();
 
     private T data;
 
index ff51f4fcfa671ff4ae71be72c6403b741e3de8dd..b50dfb1ba3e196f66e33d74438d43a2aca2d81ee 100644 (file)
@@ -12,11 +12,11 @@ import akka.actor.ActorRef;
 import akka.actor.ActorRefProvider;
 import akka.actor.Address;
 import akka.actor.Props;
-import akka.actor.UntypedActor;
 import akka.cluster.ClusterActorRefProvider;
 import akka.event.Logging;
 import akka.event.LoggingAdapter;
-import org.opendaylight.controller.remote.rpc.utils.ActorUtil;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
+import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
 import org.opendaylight.controller.utils.ConditionalProbe;
 
 import java.util.HashMap;
@@ -45,14 +45,14 @@ import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.Bu
  * This store uses a {@link org.opendaylight.controller.remote.rpc.registry.gossip.Gossiper}.
  *
  */
-public class BucketStore extends UntypedActor {
+public class BucketStore extends AbstractUntypedActorWithMetering {
 
     final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
 
     /**
      * Bucket owned by the node
      */
-    private BucketImpl localBucket = new BucketImpl();;
+    private BucketImpl localBucket = new BucketImpl();
 
     /**
      * Buckets ownded by other known nodes in the cluster
@@ -71,20 +71,24 @@ public class BucketStore extends UntypedActor {
 
     private ConditionalProbe probe;
 
+    private final RemoteRpcProviderConfig config;
+
+    public BucketStore(){
+        config = new RemoteRpcProviderConfig(getContext().system().settings().config());
+    }
+
     @Override
     public void preStart(){
         ActorRefProvider provider = getContext().provider();
         selfAddress = provider.getDefaultAddress();
 
         if ( provider instanceof ClusterActorRefProvider)
-            getContext().actorOf(Props.create(Gossiper.class).withMailbox(ActorUtil.MAILBOX), "gossiper");
+            getContext().actorOf(Props.create(Gossiper.class).withMailbox(config.getMailBoxName()), "gossiper");
     }
 
-    @Override
-    public void onReceive(Object message) throws Exception {
-
-        log.debug("Received message: node[{}], message[{}]", selfAddress, message);
 
+    @Override
+    protected void handleReceive(Object message) throws Exception {
         if (probe != null) {
             probe.tell(message, getSelf());
         }
@@ -100,17 +104,18 @@ public class BucketStore extends UntypedActor {
             receiveGetLocalBucket();
         } else if (message instanceof GetBucketsByMembers) {
             receiveGetBucketsByMembers(
-                ((GetBucketsByMembers) message).getMembers());
+                    ((GetBucketsByMembers) message).getMembers());
         } else if (message instanceof GetBucketVersions) {
             receiveGetBucketVersions();
         } else if (message instanceof UpdateRemoteBuckets) {
             receiveUpdateRemoteBuckets(
-                ((UpdateRemoteBuckets) message).getBuckets());
+                    ((UpdateRemoteBuckets) message).getBuckets());
         } else {
-            log.debug("Unhandled message [{}]", message);
+            if(log.isDebugEnabled()) {
+                log.debug("Unhandled message [{}]", message);
+            }
             unhandled(message);
         }
-
     }
 
     /**
@@ -233,8 +238,9 @@ public class BucketStore extends UntypedActor {
                 versions.put(entry.getKey(), remoteVersion);
             }
         }
-
-        log.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets);
+        if(log.isDebugEnabled()) {
+            log.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets);
+        }
     }
 
     ///
index f6ce5e55f3ee63602fc92529e92d6e93d0ff9bb3..1bbcc69f5ed4d5fa6d7d8ea773823c97c9bb6e05 100644 (file)
@@ -12,7 +12,6 @@ import akka.actor.ActorRefProvider;
 import akka.actor.ActorSelection;
 import akka.actor.Address;
 import akka.actor.Cancellable;
-import akka.actor.UntypedActor;
 import akka.cluster.Cluster;
 import akka.cluster.ClusterActorRefProvider;
 import akka.cluster.ClusterEvent;
@@ -21,7 +20,8 @@ import akka.dispatch.Mapper;
 import akka.event.Logging;
 import akka.event.LoggingAdapter;
 import akka.pattern.Patterns;
-import org.opendaylight.controller.remote.rpc.utils.ActorUtil;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActorWithMetering;
+import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
 import scala.concurrent.Future;
 import scala.concurrent.duration.FiniteDuration;
 
@@ -59,7 +59,7 @@ import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.Go
  *
  */
 
-public class Gossiper extends UntypedActor {
+public class Gossiper extends AbstractUntypedActorWithMetering {
 
     final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
 
@@ -79,7 +79,11 @@ public class Gossiper extends UntypedActor {
 
     private Boolean autoStartGossipTicks = true;
 
-    public Gossiper(){}
+    private RemoteRpcProviderConfig config;
+
+    public Gossiper(){
+        config = new RemoteRpcProviderConfig(getContext().system().settings().config());
+    }
 
     /**
      * Helpful for testing
@@ -106,7 +110,7 @@ public class Gossiper extends UntypedActor {
         if (autoStartGossipTicks) {
             gossipTask = getContext().system().scheduler().schedule(
                     new FiniteDuration(1, TimeUnit.SECONDS),        //initial delay
-                    ActorUtil.GOSSIP_TICK_INTERVAL,                 //interval
+                    config.getGossipTickInterval(),                 //interval
                     getSelf(),                                       //target
                     new Messages.GossiperMessages.GossipTick(),      //message
                     getContext().dispatcher(),                       //execution context
@@ -124,22 +128,19 @@ public class Gossiper extends UntypedActor {
     }
 
     @Override
-    public void onReceive(Object message) throws Exception {
-
-        log.debug("Received message: node[{}], message[{}]", selfAddress, message);
-
+    protected void handleReceive(Object message) throws Exception {
         //Usually sent by self via gossip task defined above. But its not enforced.
         //These ticks can be sent by another actor as well which is esp. useful while testing
         if (message instanceof GossipTick)
             receiveGossipTick();
 
-        //Message from remote gossiper with its bucket versions
+            //Message from remote gossiper with its bucket versions
         else if (message instanceof GossipStatus)
             receiveGossipStatus((GossipStatus) message);
 
-        //Message from remote gossiper with buckets. This is usually in response to GossipStatus message
-        //The contained buckets are newer as determined by the remote gossiper by comparing the GossipStatus
-        //message with its local versions
+            //Message from remote gossiper with buckets. This is usually in response to GossipStatus message
+            //The contained buckets are newer as determined by the remote gossiper by comparing the GossipStatus
+            //message with its local versions
         else if (message instanceof GossipEnvelope)
             receiveGossip((GossipEnvelope) message);
 
@@ -169,7 +170,9 @@ public class Gossiper extends UntypedActor {
         }
 
         clusterMembers.remove(member.address());
-        log.debug("Removed member [{}], Active member list [{}]", member.address(), clusterMembers);
+        if(log.isDebugEnabled()) {
+            log.debug("Removed member [{}], Active member list [{}]", member.address(), clusterMembers);
+        }
     }
 
     /**
@@ -183,8 +186,9 @@ public class Gossiper extends UntypedActor {
 
         if (!clusterMembers.contains(member.address()))
             clusterMembers.add(member.address());
-
-        log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers);
+        if(log.isDebugEnabled()) {
+            log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers);
+        }
     }
 
     /**
@@ -196,7 +200,7 @@ public class Gossiper extends UntypedActor {
     void receiveGossipTick(){
         if (clusterMembers.size() == 0) return; //no members to send gossip status to
 
-        Address remoteMemberToGossipTo = null;
+        Address remoteMemberToGossipTo;
 
         if (clusterMembers.size() == 1)
             remoteMemberToGossipTo = clusterMembers.get(0);
@@ -204,8 +208,9 @@ public class Gossiper extends UntypedActor {
             Integer randomIndex = ThreadLocalRandom.current().nextInt(0, clusterMembers.size());
             remoteMemberToGossipTo = clusterMembers.get(randomIndex);
         }
-
-        log.debug("Gossiping to [{}]", remoteMemberToGossipTo);
+        if(log.isDebugEnabled()) {
+            log.debug("Gossiping to [{}]", remoteMemberToGossipTo);
+        }
         getLocalStatusAndSendTo(remoteMemberToGossipTo);
     }
 
@@ -229,7 +234,7 @@ public class Gossiper extends UntypedActor {
 
         final ActorRef sender = getSender();
         Future<Object> futureReply =
-                Patterns.ask(getContext().parent(), new GetBucketVersions(), ActorUtil.ASK_DURATION.toMillis());
+                Patterns.ask(getContext().parent(), new GetBucketVersions(), config.getAskDuration());
 
         futureReply.map(getMapperToProcessRemoteStatus(sender, status), getContext().dispatcher());
 
@@ -243,7 +248,9 @@ public class Gossiper extends UntypedActor {
     void receiveGossip(GossipEnvelope envelope){
         //TODO: Add more validations
         if (!selfAddress.equals(envelope.to())) {
-            log.debug("Ignoring message intended for someone else. From [{}] to [{}]", envelope.from(), envelope.to());
+            if(log.isDebugEnabled()) {
+                log.debug("Ignoring message intended for someone else. From [{}] to [{}]", envelope.from(), envelope.to());
+            }
             return;
         }
 
@@ -271,7 +278,7 @@ public class Gossiper extends UntypedActor {
     void sendGossipTo(final ActorRef remote, final Set<Address> addresses){
 
         Future<Object> futureReply =
-                Patterns.ask(getContext().parent(), new GetBucketsByMembers(addresses), ActorUtil.ASK_DURATION.toMillis());
+                Patterns.ask(getContext().parent(), new GetBucketsByMembers(addresses), config.getAskDuration());
         futureReply.map(getMapperToSendGossip(remote), getContext().dispatcher());
     }
 
@@ -284,13 +291,15 @@ public class Gossiper extends UntypedActor {
 
         //Get local status from bucket store and send to remote
         Future<Object> futureReply =
-                Patterns.ask(getContext().parent(), new GetBucketVersions(), ActorUtil.ASK_DURATION.toMillis());
+                Patterns.ask(getContext().parent(), new GetBucketVersions(), config.getAskDuration());
 
         //Find gossiper on remote system
         ActorSelection remoteRef = getContext().system().actorSelection(
                 remoteActorSystemAddress.toString() + getSelf().path().toStringWithoutAddress());
 
-        log.debug("Sending bucket versions to [{}]", remoteRef);
+        if(log.isDebugEnabled()) {
+            log.debug("Sending bucket versions to [{}]", remoteRef);
+        }
 
         futureReply.map(getMapperToSendLocalStatus(remoteRef), getContext().dispatcher());
 
@@ -382,8 +391,6 @@ public class Gossiper extends UntypedActor {
                             localIsOlder.add(address);
                         else if (localVersions.get(address) > remoteVersions.get(address))
                             localIsNewer.add(address);
-                        else
-                            continue;
                     }
 
                     if (!localIsOlder.isEmpty())
@@ -417,7 +424,9 @@ public class Gossiper extends UntypedActor {
             public Void apply(Object msg) {
                 if (msg instanceof GetBucketsByMembersReply) {
                     Map<Address, Bucket> buckets = ((GetBucketsByMembersReply) msg).getBuckets();
-                    log.debug("Buckets to send from {}: {}", selfAddress, buckets);
+                    if(log.isDebugEnabled()) {
+                        log.debug("Buckets to send from {}: {}", selfAddress, buckets);
+                    }
                     GossipEnvelope envelope = new GossipEnvelope(selfAddress, sender.path().address(), buckets);
                     sender.tell(envelope, getSelf());
                 }
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/utils/ActorUtil.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/utils/ActorUtil.java
deleted file mode 100644 (file)
index e2baffa..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- *
- */
-package org.opendaylight.controller.remote.rpc.utils;
-
-import scala.concurrent.duration.Duration;
-import scala.concurrent.duration.FiniteDuration;
-
-import java.util.concurrent.TimeUnit;
-
-public class ActorUtil {
-    public static final FiniteDuration LOCAL_ASK_DURATION = Duration.create(2, TimeUnit.SECONDS);
-    public static final FiniteDuration REMOTE_ASK_DURATION = Duration.create(15, TimeUnit.SECONDS);
-    public static final FiniteDuration ASK_DURATION = Duration.create(17, TimeUnit.SECONDS);
-    public static final FiniteDuration GOSSIP_TICK_INTERVAL = Duration.create(500, TimeUnit.MILLISECONDS);
-    public static final String MAILBOX = "bounded-mailbox";
-}
index 266832a0ab0491dfd5d58d2c0414c2733ef0300b..39ac9912746b35ec1f8633e1c0a439c5bb956e3d 100644 (file)
@@ -39,7 +39,7 @@ odl-cluster-data {
 
 odl-cluster-rpc {
   bounded-mailbox {
-    mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
     mailbox-push-timeout-time = 100ms
   }
index 08db5c0043063bcfb17973f4279cdf31fb96d0fc..334d872c44845b42bd25662f9a143be4d54e0e4c 100644 (file)
@@ -5,11 +5,11 @@ module remote-rpc-connector {
 
     import config { prefix config; revision-date 2013-04-05; }
     import opendaylight-md-sal-dom {prefix dom;}
-    
+
     description
         "This module contains the base YANG definitions for
                  the remote routed rpc";
+
     revision "2014-07-07" {
         description
             "Initial revision";
@@ -25,7 +25,7 @@ module remote-rpc-connector {
     augment "/config:modules/config:module/config:configuration" {
         case remote-rpc-connector {
             when "/config:modules/config:module/config:type = 'remote-rpc-connector'";
-            
+
             container dom-broker {
                 uses config:service-ref {
                     refine type {
@@ -34,6 +34,24 @@ module remote-rpc-connector {
                     }
                 }
             }
+
+            leaf enable-metric-capture {
+                default false;
+                type boolean;
+                description "Enable or disable metric capture.";
+            }
+
+            leaf actor-system-name {
+                default odl-cluster-rpc;
+                type string;
+                description "Name by which actor system is identified. Its also used to find relevant configuration";
+            }
+
+            leaf bounded-mailbox-capacity {
+                default 1000;
+                type uint16;
+                description "Max queue size that an actor's mailbox can reach";
+            }
         }
     }
 
index 8d886829aa4edceee15a2ac30aa3c358b02f2343..46406fd4feebad58c7546ec5df12e5bb1f2f8137 100644 (file)
@@ -8,17 +8,10 @@
 
 package org.opendaylight.controller.remote.rpc;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.net.URI;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.testkit.JavaTestKit;
+import com.google.common.collect.ImmutableList;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -26,9 +19,9 @@ import org.mockito.Mockito;
 import org.opendaylight.controller.sal.core.api.Broker;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.common.RpcError;
-import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
 import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.data.api.CompositeNode;
 import org.opendaylight.yangtools.yang.data.api.Node;
 import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
@@ -36,12 +29,16 @@ import org.opendaylight.yangtools.yang.data.impl.util.CompositeNodeBuilder;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
 
-import akka.actor.ActorRef;
-import akka.actor.ActorSystem;
-import akka.testkit.JavaTestKit;
+import java.io.File;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
 
-import com.google.common.collect.ImmutableList;
-import com.typesafe.config.ConfigFactory;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 
 /**
  * Base class for RPC tests.
@@ -70,8 +67,10 @@ public class AbstractRpcTest {
 
     @BeforeClass
     public static void setup() throws InterruptedException {
-        node1 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberA"));
-        node2 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberB"));
+        RemoteRpcProviderConfig config1 = new RemoteRpcProviderConfig.Builder("memberA").build();
+        RemoteRpcProviderConfig config2 = new RemoteRpcProviderConfig.Builder("memberB").build();
+        node1 = ActorSystem.create("opendaylight-rpc", config1.get());
+        node2 = ActorSystem.create("opendaylight-rpc", config2.get());
     }
 
     @AfterClass
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/ActorSystemFactoryTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/ActorSystemFactoryTest.java
deleted file mode 100644 (file)
index cd1cd91..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
-package org.opendaylight.controller.remote.rpc;
-
-
-import akka.actor.ActorSystem;
-import com.typesafe.config.ConfigFactory;
-import junit.framework.Assert;
-import org.junit.After;
-import org.junit.Test;
-import org.opendaylight.controller.remote.rpc.utils.AkkaConfigurationReader;
-import org.osgi.framework.Bundle;
-import org.osgi.framework.BundleContext;
-
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-public class ActorSystemFactoryTest {
-  ActorSystem system = null;
-
-  @Test
-  public void testActorSystemCreation(){
-    BundleContext context = mock(BundleContext.class);
-    when(context.getBundle()).thenReturn(mock(Bundle.class));
-
-    AkkaConfigurationReader reader = mock(AkkaConfigurationReader.class);
-    when(reader.read()).thenReturn(ConfigFactory.load());
-
-    ActorSystemFactory.createInstance(context, reader);
-    system = ActorSystemFactory.getInstance();
-    Assert.assertNotNull(system);
-    // Check illegal state exception
-
-    try {
-      ActorSystemFactory.createInstance(context, reader);
-      fail("Illegal State exception should be thrown, while creating actor system second time");
-    } catch (IllegalStateException e) {
-    }
-  }
-
-  @After
-  public void cleanup() throws InterruptedException {
-    if(system != null) {
-      system.shutdown();
-    }
-  }
-}
index 6c3a57b3448e23ac485aa04518c788f2fad390ce..49451dd0db99114141c289750cf37f7b12e036c1 100644 (file)
@@ -8,27 +8,26 @@
 
 package org.opendaylight.controller.remote.rpc;
 
-import static org.junit.Assert.assertEquals;
-import java.net.URI;
-import java.util.Arrays;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
+import akka.testkit.JavaTestKit;
+import com.google.common.util.concurrent.ListenableFuture;
 import org.junit.Test;
 import org.opendaylight.controller.remote.rpc.messages.InvokeRpc;
 import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
 import org.opendaylight.controller.xml.codec.XmlUtils;
 import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 import org.opendaylight.yangtools.yang.common.RpcError.ErrorSeverity;
 import org.opendaylight.yangtools.yang.common.RpcError.ErrorType;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 import org.opendaylight.yangtools.yang.data.api.CompositeNode;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
-import akka.testkit.JavaTestKit;
+import java.net.URI;
+import java.util.Arrays;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
 
-import com.google.common.util.concurrent.ListenableFuture;
+import static org.junit.Assert.assertEquals;
 
 /***
  * Unit tests for RemoteRpcImplementation.
@@ -42,7 +41,7 @@ public class RemoteRpcImplementationTest extends AbstractRpcTest {
         final AtomicReference<AssertionError> assertError = new AtomicReference<>();
         try {
             RemoteRpcImplementation rpcImpl = new RemoteRpcImplementation(
-                    probeReg1.getRef(), schemaContext);
+                    probeReg1.getRef(), schemaContext, getConfig());
 
             final CompositeNode input = makeRPCInput("foo");
             final CompositeNode output = makeRPCOutput("bar");
@@ -68,7 +67,7 @@ public class RemoteRpcImplementationTest extends AbstractRpcTest {
         final AtomicReference<AssertionError> assertError = new AtomicReference<>();
         try {
             RemoteRpcImplementation rpcImpl = new RemoteRpcImplementation(
-                    probeReg1.getRef(), schemaContext);
+                    probeReg1.getRef(), schemaContext, getConfig());
 
             QName instanceQName = new QName(new URI("ns"), "instance");
             YangInstanceIdentifier identifier = YangInstanceIdentifier.of(instanceQName);
@@ -99,7 +98,7 @@ public class RemoteRpcImplementationTest extends AbstractRpcTest {
         final AtomicReference<AssertionError> assertError = new AtomicReference<>();
         try {
             RemoteRpcImplementation rpcImpl = new RemoteRpcImplementation(
-                    probeReg1.getRef(), schemaContext);
+                    probeReg1.getRef(), schemaContext, getConfig());
 
             final CompositeNode input = makeRPCInput("foo");
 
@@ -125,7 +124,7 @@ public class RemoteRpcImplementationTest extends AbstractRpcTest {
         final AtomicReference<AssertionError> assertError = new AtomicReference<>();
         try {
             RemoteRpcImplementation rpcImpl = new RemoteRpcImplementation(
-                    probeReg1.getRef(), schemaContext);
+                    probeReg1.getRef(), schemaContext, getConfig());
 
             final CompositeNode input = makeRPCInput("foo");
 
@@ -182,4 +181,8 @@ public class RemoteRpcImplementationTest extends AbstractRpcTest {
 
         return invokeRpcMsg;
     }
+
+    private RemoteRpcProviderConfig getConfig(){
+        return new RemoteRpcProviderConfig.Builder("unit-test").build();
+    }
 }
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderConfigTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RemoteRpcProviderConfigTest.java
new file mode 100644 (file)
index 0000000..ae75252
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc;
+
+import akka.actor.ActorSystem;
+import akka.actor.Props;
+import akka.actor.UntypedActor;
+import akka.testkit.TestActorRef;
+import com.typesafe.config.Config;
+import com.typesafe.config.ConfigFactory;
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.common.actor.AkkaConfigurationReader;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.io.File;
+import java.util.concurrent.TimeUnit;
+
+public class RemoteRpcProviderConfigTest {
+
+    @Test
+    public void testConfigDefaults() {
+
+        Config c = ConfigFactory.parseFile(new File("application.conf"));
+        RemoteRpcProviderConfig config = new RemoteRpcProviderConfig.Builder("unit-test").build();
+
+        //Assert on configurations from common config
+        Assert.assertFalse(config.isMetricCaptureEnabled()); //should be disabled by default
+        Assert.assertNotNull(config.getMailBoxCapacity());
+        Assert.assertNotNull(config.getMailBoxName());
+        Assert.assertNotNull(config.getMailBoxPushTimeout());
+
+        //rest of the configurations should be set
+        Assert.assertNotNull(config.getActorSystemName());
+        Assert.assertNotNull(config.getRpcBrokerName());
+        Assert.assertNotNull(config.getRpcBrokerPath());
+        Assert.assertNotNull(config.getRpcManagerName());
+        Assert.assertNotNull(config.getRpcManagerPath());
+        Assert.assertNotNull(config.getRpcRegistryName());
+        Assert.assertNotNull(config.getRpcRegistryPath());
+        Assert.assertNotNull(config.getAskDuration());
+        Assert.assertNotNull(config.getGossipTickInterval());
+
+
+
+    }
+
+    @Test
+    public void testConfigCustomizations() {
+
+        AkkaConfigurationReader reader = new TestConfigReader();
+
+        final int expectedCapacity = 100;
+        String timeOutVal = "10ms";
+        FiniteDuration expectedTimeout = FiniteDuration.create(10, TimeUnit.MILLISECONDS);
+
+        RemoteRpcProviderConfig config = new RemoteRpcProviderConfig.Builder("unit-test")
+                .metricCaptureEnabled(true)//enable metric capture
+                .mailboxCapacity(expectedCapacity)
+                .mailboxPushTimeout(timeOutVal)
+                .withConfigReader(reader)
+                .build();
+
+        Assert.assertTrue(config.isMetricCaptureEnabled());
+        Assert.assertEquals(expectedCapacity, config.getMailBoxCapacity().intValue());
+        Assert.assertEquals(expectedTimeout.toMillis(), config.getMailBoxPushTimeout().toMillis());
+
+        //Now check this config inside an actor
+        ActorSystem system = ActorSystem.create("unit-test", config.get());
+        TestActorRef<ConfigTestActor> configTestActorTestActorRef =
+                TestActorRef.create(system, Props.create(ConfigTestActor.class));
+
+        ConfigTestActor actor = configTestActorTestActorRef.underlyingActor();
+        Config actorConfig = actor.getConfig();
+
+        config = new RemoteRpcProviderConfig(actorConfig);
+
+        Assert.assertTrue(config.isMetricCaptureEnabled());
+        Assert.assertEquals(expectedCapacity, config.getMailBoxCapacity().intValue());
+        Assert.assertEquals(expectedTimeout.toMillis(), config.getMailBoxPushTimeout().toMillis());
+    }
+
+    public static class ConfigTestActor extends UntypedActor {
+
+        private Config actorSystemConfig;
+
+        public ConfigTestActor() {
+            this.actorSystemConfig = getContext().system().settings().config();
+        }
+
+        @Override
+        public void onReceive(Object message) throws Exception {
+        }
+
+        /**
+         * Only for testing. NEVER expose actor's internal state like this.
+         *
+         * @return
+         */
+        public Config getConfig() {
+            return actorSystemConfig;
+        }
+    }
+
+    public static class TestConfigReader implements AkkaConfigurationReader {
+
+        @Override
+        public Config read() {
+            return ConfigFactory.parseResources("application.conf");
+
+        }
+    }
+}
\ No newline at end of file
index 8a7e4a039846205846e1b54b21981f78af843783..8b4599ca8ceac01684376b63be3dded36e354e2b 100644 (file)
@@ -13,7 +13,7 @@ package org.opendaylight.controller.remote.rpc;
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.testkit.JavaTestKit;
-import com.typesafe.config.ConfigFactory;
+import com.typesafe.config.Config;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
@@ -33,11 +33,14 @@ import static org.mockito.Mockito.when;
 public class RemoteRpcProviderTest {
 
   static ActorSystem system;
-
+  static RemoteRpcProviderConfig moduleConfig;
 
   @BeforeClass
   public static void setup() throws InterruptedException {
-    system = ActorSystem.create("odl-cluster-rpc", ConfigFactory.load().getConfig("odl-cluster-rpc"));
+    moduleConfig = new RemoteRpcProviderConfig.Builder("odl-cluster-rpc").build();
+    Config config = moduleConfig.get();
+    system = ActorSystem.create("odl-cluster-rpc", config);
+
   }
 
   @AfterClass
@@ -53,9 +56,14 @@ public class RemoteRpcProviderTest {
     SchemaService schemaService = mock(SchemaService.class);
     when(schemaService.getGlobalContext()). thenReturn(mock(SchemaContext.class));
     when(session.getService(SchemaService.class)).thenReturn(schemaService);
+
     rpcProvider.onSessionInitiated(session);
-    ActorRef actorRef = Await.result(system.actorSelection(ActorConstants.RPC_MANAGER_PATH).resolveOne(Duration.create(1, TimeUnit.SECONDS)),
-        Duration.create(2, TimeUnit.SECONDS));
-    Assert.assertTrue(actorRef.path().toString().contains(ActorConstants.RPC_MANAGER_PATH));
+
+    ActorRef actorRef = Await.result(
+            system.actorSelection(
+                    moduleConfig.getRpcManagerPath()).resolveOne(Duration.create(1, TimeUnit.SECONDS)),
+                                                                 Duration.create(2, TimeUnit.SECONDS));
+
+    Assert.assertTrue(actorRef.path().toString().contains(moduleConfig.getRpcManagerPath()));
   }
 }
index 83f52930b2b07ac32c75f83f437f1048132b54df..f6f720eed0f62dd50393c3275c6e7d9cc8fab23d 100644 (file)
@@ -9,12 +9,12 @@ import akka.actor.ChildActorPath;
 import akka.actor.Props;
 import akka.testkit.JavaTestKit;
 import com.google.common.base.Predicate;
-import com.typesafe.config.ConfigFactory;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.opendaylight.controller.remote.rpc.RemoteRpcProviderConfig;
 import org.opendaylight.controller.remote.rpc.RouteIdentifierImpl;
 import org.opendaylight.controller.remote.rpc.registry.gossip.Messages;
 import org.opendaylight.controller.sal.connector.api.RpcRouter;
@@ -45,9 +45,12 @@ public class RpcRegistryTest {
 
   @BeforeClass
   public static void setup() throws InterruptedException {
-    node1 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberA"));
-    node2 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberB"));
-    node3 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberC"));
+    RemoteRpcProviderConfig config1 = new RemoteRpcProviderConfig.Builder("memberA").build();
+    RemoteRpcProviderConfig config2 = new RemoteRpcProviderConfig.Builder("memberB").build();
+    RemoteRpcProviderConfig config3 = new RemoteRpcProviderConfig.Builder("memberC").build();
+    node1 = ActorSystem.create("opendaylight-rpc", config1.get());
+    node2 = ActorSystem.create("opendaylight-rpc", config2.get());
+    node3 = ActorSystem.create("opendaylight-rpc", config3.get());
   }
 
   @AfterClass
@@ -204,7 +207,10 @@ public class RpcRegistryTest {
         new ConditionalProbe(probe.getRef(), new Predicate() {
           @Override
           public boolean apply(@Nullable Object input) {
-            return clazz.equals(input.getClass());
+              if (input != null)
+                return clazz.equals(input.getClass());
+              else
+                  return false;
           }
         });
 
index 5c4af8d3da457c99344c0a26942c9d3456694f75..8e310815faed92617d626d14a5433bd61664a00b 100644 (file)
@@ -1,6 +1,6 @@
 odl-cluster-rpc{
   bounded-mailbox {
-    mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
     mailbox-push-timeout-time = 10ms
   }
@@ -37,11 +37,12 @@ odl-cluster-rpc{
 }
 unit-test{
   akka {
-    loglevel = "INFO"
+    loglevel = "DEBUG"
     #loggers = ["akka.event.slf4j.Slf4jLogger"]
   }
   bounded-mailbox {
-    mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
+    #mailbox-capacity is specified in config subsystem
     mailbox-capacity = 1000
     mailbox-push-timeout-time = 10ms
   }
@@ -49,7 +50,7 @@ unit-test{
 
 memberA{
   bounded-mailbox {
-    mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
     mailbox-push-timeout-time = 10ms
   }
@@ -82,7 +83,7 @@ memberA{
 }
 memberB{
   bounded-mailbox {
-    mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
     mailbox-push-timeout-time = 10ms
   }
@@ -116,7 +117,7 @@ memberB{
 }
 memberC{
   bounded-mailbox {
-    mailbox-type = "org.opendaylight.controller.common.actor.MeteredBoundedMailbox"
+    mailbox-type = "org.opendaylight.controller.cluster.common.actor.MeteredBoundedMailbox"
     mailbox-capacity = 1000
     mailbox-push-timeout-time = 10ms
   }
index 4a46a3c26712c8e54da6650bd4e68b2de9ad91ca..7f8f0a1d0e32478d4b977cefab1398a252c52c2a 100644 (file)
@@ -60,31 +60,31 @@ public interface RestconfService {
 
     @GET
     @Path("/modules")
-    @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+    @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
             MediaType.APPLICATION_XML, MediaType.TEXT_XML })
     public StructuredData getModules(@Context UriInfo uriInfo);
 
     @GET
     @Path("/modules/{identifier:.+}")
-    @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+    @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
             MediaType.APPLICATION_XML, MediaType.TEXT_XML })
     public StructuredData getModules(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
 
     @GET
     @Path("/modules/module/{identifier:.+}")
-    @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+    @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
             MediaType.APPLICATION_XML, MediaType.TEXT_XML })
     public StructuredData getModule(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
 
     @GET
     @Path("/operations")
-    @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+    @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
             MediaType.APPLICATION_XML, MediaType.TEXT_XML })
     public StructuredData getOperations(@Context UriInfo uriInfo);
 
     @GET
     @Path("/operations/{identifier:.+}")
-    @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+    @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
             MediaType.APPLICATION_XML, MediaType.TEXT_XML })
     public StructuredData getOperations(@PathParam("identifier") String identifier, @Context UriInfo uriInfo);
 
@@ -149,7 +149,7 @@ public interface RestconfService {
 
     @GET
     @Path("/streams")
-    @Produces({ Draft02.MediaTypes.API + XML, Draft02.MediaTypes.API + JSON, MediaType.APPLICATION_JSON,
+    @Produces({ Draft02.MediaTypes.API + JSON, Draft02.MediaTypes.API + XML, MediaType.APPLICATION_JSON,
             MediaType.APPLICATION_XML, MediaType.TEXT_XML })
     public StructuredData getAvailableStreams(@Context UriInfo uriInfo);
 
index cfb5e5d7c2f2f57c2d71538d1db04acde1daf380..7a879f33779f1a41343d3cc48d39064c461f57b9 100644 (file)
@@ -101,9 +101,6 @@ public class NormalizedNodeJsonBodyWriter implements MessageBodyWriter<Normalize
             DataContainerChild<? extends PathArgument, ?> child = iterator.next();
             nnWriter.write(child);
             nnWriter.flush();
-            if(iterator.hasNext()) {
-                outputWriter.write(",");
-            }
         }
     }
 
index 63a5b1b54055a81cd3f8f3f736365c4e99e1d8b8..10201ab6f5148c78480b9cceea34b77766f0027e 100644 (file)
@@ -78,16 +78,19 @@ public class RestconfDocumentedExceptionMapper implements ExceptionMapper<Restco
 
         LOG.debug("In toResponse: {}", exception.getMessage());
 
-        // Default to the content type if there's no Accept header
 
-        MediaType mediaType = headers.getMediaType();
 
         List<MediaType> accepts = headers.getAcceptableMediaTypes();
+        accepts.remove(MediaType.WILDCARD_TYPE);
 
         LOG.debug("Accept headers: {}", accepts);
 
+        final MediaType mediaType;
         if (accepts != null && accepts.size() > 0) {
             mediaType = accepts.get(0); // just pick the first one
+        } else {
+            // Default to the content type if there's no Accept header
+            mediaType = MediaType.APPLICATION_JSON_TYPE;
         }
 
         LOG.debug("Using MediaType: {}", mediaType);
index 5d8c910afc31fa9d6420fc6d3a67466c34924317..a95a64b2c23d2011979726d24e95a3e382c06ce2 100644 (file)
@@ -17,6 +17,7 @@ import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
 import java.math.BigInteger;
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
@@ -137,13 +138,24 @@ public class RestconfImpl implements RestconfService {
 
     private static final String SCOPE_PARAM_NAME = "scope";
 
+    private static final String NETCONF_BASE = "urn:ietf:params:xml:ns:netconf:base:1.0";
+
+    private static final String NETCONF_BASE_PAYLOAD_NAME = "data";
+
+    private static final QName NETCONF_BASE_QNAME;
+
     static {
         try {
             EVENT_SUBSCRIPTION_AUGMENT_REVISION = new SimpleDateFormat("yyyy-MM-dd").parse("2014-07-08");
+            NETCONF_BASE_QNAME = QName.create(QNameModule.create(new URI(NETCONF_BASE), null), NETCONF_BASE_PAYLOAD_NAME );
         } catch (ParseException e) {
             throw new RestconfDocumentedException(
                     "It wasn't possible to convert revision date of sal-remote-augment to date", ErrorType.APPLICATION,
                     ErrorTag.OPERATION_FAILED);
+        } catch (URISyntaxException e) {
+            throw new RestconfDocumentedException(
+                    "It wasn't possible to create instance of URI class with "+NETCONF_BASE+" URI", ErrorType.APPLICATION,
+                    ErrorTag.OPERATION_FAILED);
         }
     }
 
@@ -705,11 +717,13 @@ public class RestconfImpl implements RestconfService {
         validateInput(iiWithData.getSchemaNode(), payload);
 
         DOMMountPoint mountPoint = iiWithData.getMountPoint();
+        validateTopLevelNodeName(payload, iiWithData.getInstanceIdentifier());
         final CompositeNode value = this.normalizeNode(payload, iiWithData.getSchemaNode(), mountPoint);
         validateListKeysEqualityInPayloadAndUri(iiWithData, value);
         final NormalizedNode<?, ?> datastoreNormalizedNode = compositeNodeToDatastoreNormalizedNode(value,
                 iiWithData.getSchemaNode());
 
+
         YangInstanceIdentifier normalizedII;
         if (mountPoint != null) {
             normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(
@@ -760,6 +774,29 @@ public class RestconfImpl implements RestconfService {
         return Response.status(Status.OK).build();
     }
 
+    private void validateTopLevelNodeName(final Node<?> node,
+            final YangInstanceIdentifier identifier) {
+        final String payloadName = getName(node);
+        final Iterator<PathArgument> pathArguments = identifier.getReversePathArguments().iterator();
+
+        //no arguments
+        if (!pathArguments.hasNext()) {
+            //no "data" payload
+            if (!node.getNodeType().equals(NETCONF_BASE_QNAME)) {
+                throw new RestconfDocumentedException("Instance identifier has to contain at least one path argument",
+                        ErrorType.PROTOCOL, ErrorTag.MALFORMED_MESSAGE);
+            }
+        //any arguments
+        } else {
+            final String identifierName = pathArguments.next().getNodeType().getLocalName();
+            if (!payloadName.equals(identifierName)) {
+                throw new RestconfDocumentedException("Payload name (" + payloadName
+                        + ") is different from identifier name (" + identifierName + ")", ErrorType.PROTOCOL,
+                        ErrorTag.MALFORMED_MESSAGE);
+            }
+        }
+    }
+
     /**
      * Validates whether keys in {@code payload} are equal to values of keys in {@code iiWithData} for list schema node
      *
diff --git a/opendaylight/md-sal/samples/clustering-test-app/configuration/pom.xml b/opendaylight/md-sal/samples/clustering-test-app/configuration/pom.xml
new file mode 100644 (file)
index 0000000..8d4bbbd
--- /dev/null
@@ -0,0 +1,55 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+  <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <artifactId>clustering-it</artifactId>
+        <groupId>org.opendaylight.controller.samples</groupId>
+        <version>1.1-SNAPSHOT</version>
+    </parent>
+    <artifactId>clustering-it-config</artifactId>
+  <packaging>jar</packaging>
+  <build>
+    <plugins>
+        <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>attach-artifacts</id>
+            <goals>
+              <goal>attach-artifact</goal>
+            </goals>
+            <phase>package</phase>
+            <configuration>
+              <artifacts>
+                <artifact>
+                  <file>${project.build.directory}/classes/initial/20-clustering-test-app.xml</file>
+                  <type>xml</type>
+                  <classifier>config</classifier>
+                </artifact>
+                  <artifact>
+                      <file>${project.build.directory}/classes/initial/module-shards.conf</file>
+                      <type>xml</type>
+                      <classifier>testmoduleshardconf</classifier>
+                  </artifact>
+                  <artifact>
+                      <file>${project.build.directory}/classes/initial/modules.conf</file>
+                      <type>xml</type>
+                      <classifier>testmoduleconf</classifier>
+                  </artifact>
+              </artifacts>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>
diff --git a/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/20-clustering-test-app.xml b/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/20-clustering-test-app.xml
new file mode 100644 (file)
index 0000000..f019709
--- /dev/null
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- vi: set et smarttab sw=4 tabstop=4: -->
+<!--
+ Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+
+ This program and the accompanying materials are made available under the
+ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ and is available at http://www.eclipse.org/legal/epl-v10.html
+-->
+<snapshot>
+    <configuration>
+        <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+            <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
+                <module>
+                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider">
+                        prefix:clustering-it-provider
+                    </type>
+                    <name>clustering-it-provider</name>
+
+                    <rpc-registry>
+                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
+                        <name>binding-rpc-broker</name>
+                    </rpc-registry>
+                    <data-broker>
+                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-async-data-broker</type>
+                        <name>binding-data-broker</name>
+                    </data-broker>
+                    <notification-service>
+                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">
+                            binding:binding-notification-service
+                        </type>
+                        <name>binding-notification-broker</name>
+                    </notification-service>
+                </module>
+            </modules>
+        </data>
+
+    </configuration>
+
+    <required-capabilities>
+        <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&amp;revision=2013-10-28</capability>
+        <capability>urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider?module=clustering-it-provider&amp;revision=2014-08-19</capability>
+
+    </required-capabilities>
+
+</snapshot>
+
diff --git a/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/module-shards.conf b/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/module-shards.conf
new file mode 100644 (file)
index 0000000..59b0be1
--- /dev/null
@@ -0,0 +1,117 @@
+# This file describes which shards live on which members
+# The format for a module-shards is as follows,
+# {
+#    name = "<friendly_name_of_the_module>"
+#    shards = [
+#        {
+#            name="<any_name_that_is_unique_for_the_module>"
+#            replicas = [
+#                "<name_of_member_on_which_to_run>"
+#            ]
+#     ]
+# }
+#
+# For Helium we support only one shard per module. Beyond Helium
+# we will support more than 1
+# The replicas section is a collection of member names. This information
+# will be used to decide on which members replicas of a particular shard will be
+# located. Once replication is integrated with the distributed data store then
+# this section can have multiple entries.
+#
+#
+
+
+module-shards = [
+    {
+        name = "default"
+        shards = [
+            {
+                name="default"
+                replicas = [
+                    "member-1",
+                    "member-2",
+                    "member-3"
+                ]
+            }
+        ]
+    },
+    {
+        name = "topology"
+        shards = [
+            {
+                name="topology"
+                replicas = [
+                    "member-1",
+                    "member-2",
+                    "member-3"
+                ]
+            }
+        ]
+    },
+    {
+        name = "inventory"
+        shards = [
+            {
+                name="inventory"
+                replicas = [
+                    "member-1",
+                    "member-2",
+                    "member-3"
+                ]
+            }
+        ]
+    },
+         {
+             name = "toaster"
+             shards = [
+                 {
+                     name="toaster"
+                     replicas = [
+                         "member-1",
+                         "member-2",
+                         "member-3"
+                     ]
+                 }
+             ]
+         }
+         {
+             name = "car"
+             shards = [
+                 {
+                     name="car"
+                     replicas = [
+                         "member-1",
+                         "member-2",
+                         "member-3"
+                     ]
+                 }
+             ]
+         }
+         {
+             name = "people"
+             shards = [
+                 {
+                     name="people"
+                     replicas = [
+                         "member-1",
+                         "member-2",
+                         "member-3"
+                     ]
+                 }
+             ]
+         }
+         {
+             name = "car-people"
+             shards = [
+                 {
+                     name="car-people"
+                     replicas = [
+                         "member-1",
+                         "member-2",
+                         "member-3"
+                     ]
+                 }
+             ]
+         }
+
+]
diff --git a/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/modules.conf b/opendaylight/md-sal/samples/clustering-test-app/configuration/src/main/resources/initial/modules.conf
new file mode 100644 (file)
index 0000000..eda60d3
--- /dev/null
@@ -0,0 +1,47 @@
+# This file should describe all the modules that need to be placed in a separate shard
+# The format of the configuration is as follows
+# {
+#    name = "<friendly_name_of_module>"
+#    namespace = "<the yang namespace of the module>"
+#    shard-strategy = "module"
+# }
+#
+# Note that at this time the only shard-strategy we support is module which basically
+# will put all the data of a single module in two shards (one for config and one for
+# operational data)
+
+modules = [
+    {
+        name = "inventory"
+        namespace = "urn:opendaylight:inventory"
+        shard-strategy = "module"
+    },
+
+    {
+        name = "topology"
+        namespace = "urn:TBD:params:xml:ns:yang:network-topology"
+        shard-strategy = "module"
+    },
+
+    {
+        name = "toaster"
+        namespace = "http://netconfcentral.org/ns/toaster"
+        shard-strategy = "module"
+    },
+    {
+       name = "car"
+        namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car"
+       shard-strategy = "module"
+    }
+    {
+       name = "people"
+        namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people"
+       shard-strategy = "module"
+    }
+    
+    {
+       name = "car-people"
+        namespace = "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people"
+       shard-strategy = "module"
+    }
+]
diff --git a/opendaylight/md-sal/samples/clustering-test-app/model/pom.xml b/opendaylight/md-sal/samples/clustering-test-app/model/pom.xml
new file mode 100644 (file)
index 0000000..a23e32d
--- /dev/null
@@ -0,0 +1,115 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <artifactId>clustering-it</artifactId>
+        <groupId>org.opendaylight.controller.samples</groupId>
+        <version>1.1-SNAPSHOT</version>
+    </parent>
+    <artifactId>clustering-it-model</artifactId>
+    <packaging>bundle</packaging>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.felix</groupId>
+                <artifactId>maven-bundle-plugin</artifactId>
+                <version>${bundle.plugin.version}</version>
+                <extensions>true</extensions>
+                <configuration>
+                    <instructions>
+                        <Bundle-Name>org.opendaylight.controller.sal-clustering-it-model</Bundle-Name>
+                        <Import-Package>*</Import-Package>
+                    </instructions>
+                </configuration>
+            </plugin>
+            <plugin>
+                <groupId>org.opendaylight.yangtools</groupId>
+                <artifactId>yang-maven-plugin</artifactId>
+                <version>${yangtools.version}</version>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>generate-sources</goal>
+                        </goals>
+                        <configuration>
+                            <yangFilesRootDir>src/main/yang</yangFilesRootDir>
+                            <codeGenerators>
+                                <generator>
+                                    <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+                                    <outputBaseDir>target/generated-sources/sal</outputBaseDir>
+                                </generator>
+                            </codeGenerators>
+                            <inspectDependencies>true</inspectDependencies>
+                        </configuration>
+                    </execution>
+                </executions>
+                <dependencies>
+                    <dependency>
+                        <groupId>org.opendaylight.yangtools</groupId>
+                        <artifactId>maven-sal-api-gen-plugin</artifactId>
+                        <version>${yangtools.version}</version>
+                        <type>jar</type>
+                    </dependency>
+                </dependencies>
+            </plugin>
+        </plugins>
+        <pluginManagement>
+            <plugins>
+                <!--This plugin's configuration is used to store Eclipse
+                    m2e settings only. It has no influence on the Maven build itself. -->
+                <plugin>
+                    <groupId>org.eclipse.m2e</groupId>
+                    <artifactId>lifecycle-mapping</artifactId>
+                    <version>1.0.0</version>
+                    <configuration>
+                        <lifecycleMappingMetadata>
+                            <pluginExecutions>
+                                <pluginExecution>
+                                    <pluginExecutionFilter>
+                                        <groupId>org.opendaylight.yangtools</groupId>
+                                        <artifactId>yang-maven-plugin</artifactId>
+                                        <versionRange>[0.5,)</versionRange>
+                                        <goals>
+                                            <goal>generate-sources</goal>
+                                        </goals>
+                                    </pluginExecutionFilter>
+                                    <action>
+                                        <ignore />
+                                    </action>
+                                </pluginExecution>
+                            </pluginExecutions>
+                        </lifecycleMappingMetadata>
+                    </configuration>
+                </plugin>
+            </plugins>
+        </pluginManagement>
+    </build>
+    <dependencies>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-binding</artifactId>
+            <version>${yangtools.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools</groupId>
+            <artifactId>yang-common</artifactId>
+            <version>${yangtools.version}</version>
+        </dependency>
+        <dependency>
+       <groupId>org.opendaylight.yangtools.model</groupId>
+       <artifactId>ietf-inet-types</artifactId>
+       <version>${ietf-inet-types.version}</version>
+      </dependency>
+      <dependency>
+       <groupId>org.opendaylight.yangtools.model</groupId>
+       <artifactId>ietf-yang-types</artifactId>
+       <version>${ietf-yang-types.version}</version>
+      </dependency>
+        <dependency>
+            <groupId>org.opendaylight.yangtools.model</groupId>
+            <artifactId>yang-ext</artifactId>
+            <version>${yang-ext.version}</version>
+        </dependency>
+    </dependencies>
+</project>
diff --git a/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-people.yang b/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-people.yang
new file mode 100644 (file)
index 0000000..8997246
--- /dev/null
@@ -0,0 +1,42 @@
+module car-people {
+
+    yang-version 1;
+
+    namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-people";
+
+    prefix car;
+
+         import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+         import car { prefix "c"; revision-date 2014-08-18; }
+         import people { prefix "people"; revision-date 2014-08-18; }
+
+    organization "Netconf Central";
+
+    contact
+      "Harman Singh <harmasin@cisco.com>";
+
+    description
+      "YANG model for car for test application";
+
+    revision "2014-08-18" {
+      description
+        "Clustering sample app";
+    }
+
+    container car-people {
+      description
+       "Top-level container for all people car map";
+
+      list car-person {
+        key "car-id person-id";
+        description "A mapping of cars and people.";
+        leaf car-id {
+          type c:car-id;
+        }
+
+        leaf person-id {
+          type people:person-id;
+        }
+      }
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-purchase.yang b/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car-purchase.yang
new file mode 100644 (file)
index 0000000..f6a8797
--- /dev/null
@@ -0,0 +1,60 @@
+module car-purchase {
+
+    yang-version 1;
+
+    namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car-purchase";
+
+    prefix cp;
+
+         import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+         import car { prefix "car"; revision-date 2014-08-18; }
+         import people { prefix "person"; revision-date 2014-08-18; }
+         import yang-ext {prefix "ext"; revision-date "2013-07-09";}
+
+    organization "Netconf Central";
+
+    contact
+      "Harman Singh <harmasin@cisco.com>";
+
+    description
+      "YANG model for car purchase for test application";
+
+    revision "2014-08-18" {
+      description
+        "Clustering sample app";
+    }
+
+    rpc buy-car {
+          description
+            "buy a new car";
+          input {
+            leaf person {
+              ext:context-reference "person:person-context";
+              type person:person-ref;
+              description "A reference to a particular person.";
+            }
+
+            leaf car-id {
+              type car:car-id;
+              description "identifier of car.";
+            }
+            leaf person-id {
+              type person:person-id;
+              description "identifier of person.";
+            }
+          }
+        }
+
+        notification carBought {
+          description
+            "Indicates that a person bought a car.";
+          leaf car-id {
+            type car:car-id;
+            description "identifier of car.";
+          }
+          leaf person-id {
+            type person:person-id;
+            description "identifier of person.";
+          }
+        }
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car.yang b/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/car.yang
new file mode 100644 (file)
index 0000000..d9cfb6b
--- /dev/null
@@ -0,0 +1,64 @@
+module car {
+
+    yang-version 1;
+
+    namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:car";
+
+    prefix car;
+
+       import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+       
+    organization "Netconf Central";
+
+    contact
+      "Harman Singh <harmasin@cisco.com>";
+
+    description
+      "YANG model for car for test application";
+
+    revision "2014-08-18" {
+      description
+        "Clustering sample app";
+    }
+    
+    typedef car-id {
+      type inet:uri;
+      description "An identifier for car entry.";
+    }
+
+    grouping car-entry {
+      description "Describes the contents of a car entry -
+                       Details of the car manufacturer, model etc";
+      leaf id {
+        type car-id;
+        description "identifier of single list of entries.";
+      }
+
+      leaf model {
+        type string;
+      }
+      leaf manufacturer {
+        type string;
+      }
+
+      leaf year {
+        type uint32;
+      }
+
+      leaf category {
+        type string;
+      }
+    }
+    
+    container cars {
+      description
+        "Top-level container for all car objects.";
+      list car-entry {
+       key "id";
+       description "A list of cars (as defined by the 'grouping car-entry').";
+       uses car-entry;
+      }
+    }
+
+    
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/people.yang b/opendaylight/md-sal/samples/clustering-test-app/model/src/main/yang/people.yang
new file mode 100644 (file)
index 0000000..6c8f247
--- /dev/null
@@ -0,0 +1,80 @@
+module people {
+
+    yang-version 1;
+
+    namespace "urn:opendaylight:params:xml:ns:yang:controller:config:sal-clustering-it:people";
+
+    prefix people;
+
+         import ietf-inet-types { prefix "inet"; revision-date 2010-09-24; }
+       
+    organization "Netconf Central";
+
+    contact
+      "Harman Singh <harmasin@cisco.com>";
+
+    description
+      "YANG model for person for test application";
+
+    revision "2014-08-18" {
+      description
+        "Clustering sample app";
+    }
+    
+    typedef person-id {
+      type inet:uri;
+      description "An identifier for person.";
+    }
+
+    typedef person-ref {
+      type instance-identifier;
+      description "A reference that points to an people:people/person in the data tree.";
+    }
+    identity person-context {
+        description "A person-context is a classifier for person elements which allows an RPC to provide a service on behalf of a particular element in the data tree.";
+    }
+
+    grouping person {
+      description "Describes the details of the person";
+
+      leaf id {
+        type person-id;
+        description "identifier of single list of entries.";
+      }
+
+      leaf gender {
+        type string;
+      }
+
+      leaf age {
+        type uint32;
+      }
+
+      leaf address {
+        type string;
+      }
+
+      leaf contactNo {
+        type string;
+      }
+    }
+    
+    container people {
+      description
+        "Top-level container for all people";
+
+      list person {
+       key "id";
+       description "A list of people (as defined by the 'grouping person').";
+       uses person;
+      }
+    }
+
+    rpc add-person {
+      description
+        "Add a person entry into database";
+      input {
+        uses person;
+      }
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/samples/clustering-test-app/pom.xml b/opendaylight/md-sal/samples/clustering-test-app/pom.xml
new file mode 100644 (file)
index 0000000..863bbec
--- /dev/null
@@ -0,0 +1,16 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>org.opendaylight.controller.samples</groupId>
+        <artifactId>sal-samples</artifactId>
+        <version>1.1-SNAPSHOT</version>
+    </parent>
+    <artifactId>clustering-it</artifactId>
+    <packaging>pom</packaging>
+  <modules>
+    <module>configuration</module>
+    <module>model</module>
+    <module>provider</module>
+  </modules>
+</project>
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/pom.xml b/opendaylight/md-sal/samples/clustering-test-app/provider/pom.xml
new file mode 100644 (file)
index 0000000..093b681
--- /dev/null
@@ -0,0 +1,102 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <artifactId>clustering-it</artifactId>
+        <groupId>org.opendaylight.controller.samples</groupId>
+        <version>1.1-SNAPSHOT</version>
+    </parent>
+    <artifactId>clustering-it-provider</artifactId>
+    <packaging>bundle</packaging>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.felix</groupId>
+                <artifactId>maven-bundle-plugin</artifactId>
+                <version>${bundle.plugin.version}</version>
+                <extensions>true</extensions>
+                <configuration>
+                    <instructions>
+                        <Export-Package>org.opendaylight.controller.config.yang.config.clustering_it_provider</Export-Package>
+                        <Import-Package>*</Import-Package>
+                    </instructions>
+                </configuration>
+            </plugin>
+            <plugin>
+                <groupId>org.opendaylight.yangtools</groupId>
+                <artifactId>yang-maven-plugin</artifactId>
+                <version>${yangtools.version}</version>
+                <executions>
+                    <execution>
+                        <id>config</id>
+                        <goals>
+                            <goal>generate-sources</goal>
+                        </goals>
+                        <configuration>
+                            <codeGenerators>
+                                <generator>
+                                    <codeGeneratorClass>org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator</codeGeneratorClass>
+                                    <outputBaseDir>${jmxGeneratorPath}</outputBaseDir>
+                                    <additionalConfiguration>
+                                        <namespaceToPackage1>urn:opendaylight:params:xml:ns:yang:controller==org.opendaylight.controller.config.yang</namespaceToPackage1>
+                                    </additionalConfiguration>
+                                </generator>
+                                <generator>
+                                    <codeGeneratorClass>org.opendaylight.yangtools.maven.sal.api.gen.plugin.CodeGeneratorImpl</codeGeneratorClass>
+                                    <outputBaseDir>${salGeneratorPath}</outputBaseDir>
+                                </generator>
+                            </codeGenerators>
+                            <inspectDependencies>true</inspectDependencies>
+                        </configuration>
+                    </execution>
+                </executions>
+                <dependencies>
+                    <dependency>
+                        <groupId>org.opendaylight.controller</groupId>
+                        <artifactId>yang-jmx-generator-plugin</artifactId>
+                        <version>${config.version}</version>
+                    </dependency>
+                    <dependency>
+                        <groupId>org.opendaylight.yangtools</groupId>
+                        <artifactId>maven-sal-api-gen-plugin</artifactId>
+                        <version>${yangtools.version}</version>
+                    </dependency>
+                </dependencies>
+            </plugin>
+        </plugins>
+    </build>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.opendaylight.controller.samples</groupId>
+            <artifactId>clustering-it-model</artifactId>
+            <version>${version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>config-api</artifactId>
+            <version>${config.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-binding-config</artifactId>
+            <version>${mdsal.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-binding-api</artifactId>
+            <version>${mdsal.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>sal-common-util</artifactId>
+            <version>${mdsal.version}</version>
+        </dependency>
+        <dependency>
+          <groupId>equinoxSDK381</groupId>
+          <artifactId>org.eclipse.osgi</artifactId>
+          <version>3.8.1.v20120830-144521</version>
+        </dependency>
+    </dependencies>
+</project>
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/listener/PeopleCarListener.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/listener/PeopleCarListener.java
new file mode 100644 (file)
index 0000000..4737d6e
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.listener;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.CarPeople;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPerson;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.people.rev140818.car.people.CarPersonKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBought;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseListener;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class PeopleCarListener implements CarPurchaseListener {
+
+  private static final Logger log = LoggerFactory.getLogger(PeopleCarListener.class);
+
+  private DataBroker dataProvider;
+
+
+
+  public void setDataProvider(final DataBroker salDataProvider) {
+    this.dataProvider = salDataProvider;
+  }
+
+  @Override
+  public void onCarBought(CarBought notification) {
+    log.info("onCarBought notification : Adding car person entry");
+
+    final CarPersonBuilder carPersonBuilder = new CarPersonBuilder();
+    carPersonBuilder.setCarId(notification.getCarId());
+    carPersonBuilder.setPersonId(notification.getPersonId());
+    CarPersonKey key = new CarPersonKey(notification.getCarId(), notification.getPersonId());
+    carPersonBuilder.setKey(key);
+    final CarPerson carPerson = carPersonBuilder.build();
+
+    InstanceIdentifier<CarPerson> carPersonIId =
+        InstanceIdentifier.<CarPeople>builder(CarPeople.class).child(CarPerson.class, carPerson.getKey()).build();
+
+
+    WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
+    tx.put(LogicalDatastoreType.CONFIGURATION, carPersonIId, carPerson);
+
+    Futures.addCallback(tx.submit(), new FutureCallback<Void>() {
+      @Override
+      public void onSuccess(final Void result) {
+        log.info("Car bought, entry added to map of people and car [{}]", carPerson);
+      }
+
+      @Override
+      public void onFailure(final Throwable t) {
+        log.info("Car bought, Failed entry addition to map of people and car [{}]", carPerson);
+      }
+    });
+
+  }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PeopleProvider.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PeopleProvider.java
new file mode 100644 (file)
index 0000000..e0d3f75
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.provider;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.SettableFuture;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.AddPersonInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.People;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PersonContext;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.Person;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.people.PersonBuilder;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Future;
+
+public class PeopleProvider implements PeopleService, AutoCloseable {
+
+  private static final Logger log = LoggerFactory.getLogger(PeopleProvider.class);
+
+  private DataBroker dataProvider;
+
+  private BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> rpcRegistration;
+
+  public void setDataProvider(final DataBroker salDataProvider) {
+    this.dataProvider = salDataProvider;
+  }
+
+
+  public void setRpcRegistration(BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> rpcRegistration) {
+    this.rpcRegistration = rpcRegistration;
+  }
+
+  @Override
+  public Future<RpcResult<Void>> addPerson(AddPersonInput input) {
+    log.info("RPC addPerson : adding person [{}]", input);
+
+    PersonBuilder builder = new PersonBuilder(input);
+    final Person person = builder.build();
+    final SettableFuture<RpcResult<Void>> futureResult = SettableFuture.create();
+
+    // Each entry will be identifiable by a unique key, we have to create that identifier
+    final InstanceIdentifier.InstanceIdentifierBuilder<Person> personIdBuilder =
+        InstanceIdentifier.<People>builder(People.class)
+            .child(Person.class, person.getKey());
+    final InstanceIdentifier personId = personIdBuilder.build();
+    // Place entry in data store tree
+    WriteTransaction tx = dataProvider.newWriteOnlyTransaction();
+    tx.put(LogicalDatastoreType.CONFIGURATION, personId, person);
+
+    Futures.addCallback(tx.submit(), new FutureCallback<Void>() {
+      @Override
+      public void onSuccess(final Void result) {
+        log.info("RPC addPerson : person added successfully [{}]", person);
+        rpcRegistration.registerPath(PersonContext.class, personId);
+        log.info("RPC addPerson : routed rpc registered for instance ID [{}]", personId);
+        futureResult.set(RpcResultBuilder.<Void>success().build());
+      }
+
+      @Override
+      public void onFailure(final Throwable t) {
+        log.info("RPC addPerson : person addition failed [{}]", person);
+        futureResult.set(RpcResultBuilder.<Void>failed()
+            .withError(RpcError.ErrorType.APPLICATION, t.getMessage()).build());
+      }
+    });
+    return futureResult;
+  }
+
+  @Override
+  public void close() throws Exception {
+
+  }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PurchaseCarProvider.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/clustering/it/provider/PurchaseCarProvider.java
new file mode 100644 (file)
index 0000000..74a0aa6
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.clustering.it.provider;
+
+import com.google.common.util.concurrent.SettableFuture;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.BuyCarInput;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarBoughtBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.Future;
+
+
+public class PurchaseCarProvider implements CarPurchaseService, AutoCloseable{
+
+  private static final Logger log = LoggerFactory.getLogger(PurchaseCarProvider.class);
+
+  private NotificationProviderService notificationProvider;
+
+
+  public void setNotificationProvider(final NotificationProviderService salService) {
+    this.notificationProvider = salService;
+  }
+
+
+  @Override
+  public Future<RpcResult<Void>> buyCar(BuyCarInput input) {
+    log.info("Routed RPC buyCar : generating notification for buying car [{}]", input);
+    SettableFuture<RpcResult<Void>> futureResult = SettableFuture.create();
+    CarBoughtBuilder carBoughtBuilder = new CarBoughtBuilder();
+    carBoughtBuilder.setCarId(input.getCarId());
+    carBoughtBuilder.setPersonId(input.getPersonId());
+    notificationProvider.publish(carBoughtBuilder.build());
+    futureResult.set(RpcResultBuilder.<Void>success().build());
+    return futureResult;
+  }
+
+  @Override
+  public void close() throws Exception {
+
+  }
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/config/yang/config/clustering_it_provider/ClusteringItProviderModule.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/config/yang/config/clustering_it_provider/ClusteringItProviderModule.java
new file mode 100644 (file)
index 0000000..d91d40a
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.config.yang.config.clustering_it_provider;
+
+
+import org.opendaylight.controller.clustering.it.listener.PeopleCarListener;
+import org.opendaylight.controller.clustering.it.provider.PeopleProvider;
+import org.opendaylight.controller.clustering.it.provider.PurchaseCarProvider;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
+import org.opendaylight.controller.sal.binding.api.NotificationProviderService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.car.purchase.rev140818.CarPurchaseService;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.config.sal.clustering.it.people.rev140818.PeopleService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.NotificationListener;
+
+public class ClusteringItProviderModule extends org.opendaylight.controller.config.yang.config.clustering_it_provider.AbstractClusteringItProviderModule {
+    public ClusteringItProviderModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
+        super(identifier, dependencyResolver);
+    }
+
+    public ClusteringItProviderModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver, org.opendaylight.controller.config.yang.config.clustering_it_provider.ClusteringItProviderModule oldModule, java.lang.AutoCloseable oldInstance) {
+        super(identifier, dependencyResolver, oldModule, oldInstance);
+    }
+
+    @Override
+    public void customValidation() {
+        // add custom validation form module attributes here.
+    }
+
+    @Override
+    public java.lang.AutoCloseable createInstance() {
+      DataBroker dataBrokerService = getDataBrokerDependency();
+      NotificationProviderService notificationProvider = getNotificationServiceDependency();
+
+      // Add routed RPC registration for car purchase
+      final PurchaseCarProvider purchaseCar = new PurchaseCarProvider();
+      purchaseCar.setNotificationProvider(notificationProvider);
+
+      final BindingAwareBroker.RoutedRpcRegistration<CarPurchaseService> purchaseCarRpc = getRpcRegistryDependency()
+          .addRoutedRpcImplementation(CarPurchaseService.class, purchaseCar);
+
+      // Add people provider registration
+      final PeopleProvider people = new PeopleProvider();
+      people.setDataProvider(dataBrokerService);
+
+      people.setRpcRegistration(purchaseCarRpc);
+
+      final BindingAwareBroker.RpcRegistration<PeopleService> peopleRpcReg = getRpcRegistryDependency()
+          .addRpcImplementation(PeopleService.class, people);
+
+
+
+      final PeopleCarListener peopleCarListener = new PeopleCarListener();
+      peopleCarListener.setDataProvider(dataBrokerService);
+
+      final ListenerRegistration<NotificationListener> listenerReg =
+          getNotificationServiceDependency().registerNotificationListener( peopleCarListener );
+
+      // Wrap toaster as AutoCloseable and close registrations to md-sal at
+      // close()
+      final class AutoCloseableToaster implements AutoCloseable {
+
+        @Override
+        public void close() throws Exception {
+          peopleRpcReg.close();
+          purchaseCarRpc.close();
+          people.close();
+          purchaseCar.close();
+          listenerReg.close();
+        }
+      }
+
+      AutoCloseable ret = new AutoCloseableToaster();
+      return ret;
+    }
+
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/config/yang/config/clustering_it_provider/ClusteringItProviderModuleFactory.java b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/java/org/opendaylight/controller/config/yang/config/clustering_it_provider/ClusteringItProviderModuleFactory.java
new file mode 100644 (file)
index 0000000..642263c
--- /dev/null
@@ -0,0 +1,13 @@
+/*
+* Generated file
+*
+* Generated from: yang module name: clustering-it-provider yang module local name: clustering-it-provider
+* Generated by: org.opendaylight.controller.config.yangjmxgenerator.plugin.JMXGenerator
+* Generated at: Tue Aug 19 14:44:46 PDT 2014
+*
+* Do not modify this file unless it is present under src/main directory
+*/
+package org.opendaylight.controller.config.yang.config.clustering_it_provider;
+public class ClusteringItProviderModuleFactory extends org.opendaylight.controller.config.yang.config.clustering_it_provider.AbstractClusteringItProviderModuleFactory {
+
+}
diff --git a/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/yang/clustering-it-provider.yang b/opendaylight/md-sal/samples/clustering-test-app/provider/src/main/yang/clustering-it-provider.yang
new file mode 100644 (file)
index 0000000..ff3f9a8
--- /dev/null
@@ -0,0 +1,60 @@
+module clustering-it-provider {
+
+    yang-version 1;
+    namespace "urn:opendaylight:params:xml:ns:yang:controller:config:clustering-it-provider";
+    prefix "clustering-it-provider";
+
+    import config { prefix config; revision-date 2013-04-05; }
+    import opendaylight-md-sal-binding { prefix mdsal; revision-date 2013-10-28; }
+
+    description
+        "This module contains the base YANG definitions for
+        clustering-it-provider implementation.";
+
+    revision "2014-08-19" {
+        description
+            "Initial revision.";
+    }
+
+    // This is the definition of the service implementation as a module identity.
+    identity clustering-it-provider {
+            base config:module-type;
+
+            // Specifies the prefix for generated java classes.
+            config:java-name-prefix ClusteringItProvider;
+    }
+
+    // Augments the 'configuration' choice node under modules/module.
+    augment "/config:modules/config:module/config:configuration" {
+        case clustering-it-provider {
+            when "/config:modules/config:module/config:type = 'clustering-it-provider'";
+
+            container rpc-registry {
+                uses config:service-ref {
+                    refine type {
+                        mandatory true;
+                        config:required-identity mdsal:binding-rpc-registry;
+                    }
+                }
+            }
+
+            container notification-service {
+                uses config:service-ref {
+                    refine type {
+                        mandatory true;
+                        config:required-identity mdsal:binding-notification-service;
+                    }
+                }
+            }
+
+            container data-broker {
+                uses config:service-ref {
+                    refine type {
+                        mandatory false;
+                        config:required-identity mdsal:binding-async-data-broker;
+                    }
+                }
+            }
+        }
+    }
+}
index ae7d3234808bdd00059fce9403419eb923f8a8fd..d13200e4e8c9a12bce5a3a989472c13147b4a75f 100644 (file)
@@ -17,6 +17,7 @@
     <module>toaster-provider</module>
     <module>toaster-config</module>
     <module>l2switch</module>
+    <module>clustering-test-app</module>
   </modules>
   <scm>
     <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
index fe1813a19943dfe7f3446b55f9c49b1986df6729..57313d2948960b038791d75a5fdb9287cce41530 100644 (file)
       <artifactId>org.osgi.core</artifactId>
       <scope>provided</scope>
     </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-all</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
 
   <build>
index c1996f4691632637abc9fc7dffacce0bcb12f2ad..361373d78da93f114f51c887ab95c78bc6ab3265 100644 (file)
@@ -15,9 +15,9 @@ import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMap
 import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNode;
 import static org.opendaylight.md.controller.topology.manager.FlowCapableNodeMapping.toTopologyNodeId;
 
+import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
-
 import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
@@ -50,17 +50,19 @@ import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 
 class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, OpendaylightInventoryListener {
 
-    private final Logger LOG = LoggerFactory.getLogger(FlowCapableTopologyExporter.class);
+    private static final Logger LOG = LoggerFactory.getLogger(FlowCapableTopologyExporter.class);
     private final InstanceIdentifier<Topology> topology;
     private final OperationProcessor processor;
 
-    FlowCapableTopologyExporter(final OperationProcessor processor, final InstanceIdentifier<Topology> topology) {
+    FlowCapableTopologyExporter(final OperationProcessor processor,
+            final InstanceIdentifier<Topology> topology) {
         this.processor = Preconditions.checkNotNull(processor);
         this.topology = Preconditions.checkNotNull(topology);
     }
@@ -73,15 +75,14 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
 
         processor.enqueueOperation(new TopologyOperation() {
             @Override
-            public void applyOperation(final ReadWriteTransaction transaction) {
-                removeAffectedLinks(nodeId);
+            public void applyOperation(ReadWriteTransaction transaction) {
+                removeAffectedLinks(nodeId, transaction);
+                transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance);
             }
-        });
 
-        processor.enqueueOperation(new TopologyOperation() {
             @Override
-            public void applyOperation(ReadWriteTransaction transaction) {
-                transaction.delete(LogicalDatastoreType.OPERATIONAL, nodeInstance);
+            public String toString() {
+                return "onNodeRemoved";
             }
         });
     }
@@ -97,6 +98,11 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
                     final InstanceIdentifier<Node> path = getNodePath(toTopologyNodeId(notification.getId()));
                     transaction.merge(LogicalDatastoreType.OPERATIONAL, path, node, true);
                 }
+
+                @Override
+                public String toString() {
+                    return "onNodeUpdated";
+                }
             });
         }
     }
@@ -104,28 +110,30 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
     @Override
     public void onNodeConnectorRemoved(final NodeConnectorRemoved notification) {
 
-        final InstanceIdentifier<TerminationPoint> tpInstance = toTerminationPointIdentifier(notification
-                .getNodeConnectorRef());
+        final InstanceIdentifier<TerminationPoint> tpInstance = toTerminationPointIdentifier(
+                notification.getNodeConnectorRef());
 
-        processor.enqueueOperation(new TopologyOperation() {
-            @Override
-            public void applyOperation(final ReadWriteTransaction transaction) {
-                final TpId tpId = toTerminationPointId(getNodeConnectorKey(notification.getNodeConnectorRef()).getId());
-                removeAffectedLinks(tpId);
-            }
-        });
+        final TpId tpId = toTerminationPointId(getNodeConnectorKey(
+                notification.getNodeConnectorRef()).getId());
 
         processor.enqueueOperation(new TopologyOperation() {
             @Override
             public void applyOperation(ReadWriteTransaction transaction) {
+                removeAffectedLinks(tpId, transaction);
                 transaction.delete(LogicalDatastoreType.OPERATIONAL, tpInstance);
             }
+
+            @Override
+            public String toString() {
+                return "onNodeConnectorRemoved";
+            }
         });
     }
 
     @Override
     public void onNodeConnectorUpdated(final NodeConnectorUpdated notification) {
-        final FlowCapableNodeConnectorUpdated fcncu = notification.getAugmentation(FlowCapableNodeConnectorUpdated.class);
+        final FlowCapableNodeConnectorUpdated fcncu = notification.getAugmentation(
+                FlowCapableNodeConnectorUpdated.class);
         if (fcncu != null) {
             processor.enqueueOperation(new TopologyOperation() {
                 @Override
@@ -137,9 +145,14 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
                     transaction.merge(LogicalDatastoreType.OPERATIONAL, path, point, true);
                     if ((fcncu.getState() != null && fcncu.getState().isLinkDown())
                             || (fcncu.getConfiguration() != null && fcncu.getConfiguration().isPORTDOWN())) {
-                        removeAffectedLinks(point.getTpId());
+                        removeAffectedLinks(point.getTpId(), transaction);
                     }
                 }
+
+                @Override
+                public String toString() {
+                    return "onNodeConnectorUpdated";
+                }
             });
         }
     }
@@ -153,6 +166,11 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
                 final InstanceIdentifier<Link> path = linkPath(link);
                 transaction.merge(LogicalDatastoreType.OPERATIONAL, path, link, true);
             }
+
+            @Override
+            public String toString() {
+                return "onLinkDiscovered";
+            }
         });
     }
 
@@ -168,6 +186,11 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
             public void applyOperation(final ReadWriteTransaction transaction) {
                 transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(toTopologyLink(notification)));
             }
+
+            @Override
+            public String toString() {
+                return "onLinkRemoved";
+            }
         });
     }
 
@@ -188,62 +211,92 @@ class FlowCapableTopologyExporter implements FlowTopologyDiscoveryListener, Open
         return tpPath(toTopologyNodeId(invNodeKey.getId()), toTerminationPointId(invNodeConnectorKey.getId()));
     }
 
-    private void removeAffectedLinks(final NodeId id) {
-        processor.enqueueOperation(new TopologyOperation() {
+    private void removeAffectedLinks(final NodeId id, final ReadWriteTransaction transaction) {
+        CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture =
+                transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
+        Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
             @Override
-            public void applyOperation(final ReadWriteTransaction transaction) {
-                CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture = transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
-                Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
-                    @Override
-                    public void onSuccess(Optional<Topology> topologyOptional) {
-                        if (topologyOptional.isPresent()) {
-                            List<Link> linkList = topologyOptional.get().getLink() != null
-                                    ? topologyOptional.get().getLink() : Collections.<Link> emptyList();
-                            for (Link link : linkList) {
-                                if (id.equals(link.getSource().getSourceNode()) || id.equals(link.getDestination().getDestNode())) {
-                                    transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
-                                }
-                            }
-                        }
-                    }
+            public void onSuccess(Optional<Topology> topologyOptional) {
+                removeAffectedLinks(id, topologyOptional);
+            }
 
-                    @Override
-                    public void onFailure(Throwable throwable) {
-                        LOG.error("Error reading topology data for topology {}", topology, throwable);
-                    }
-                });
+            @Override
+            public void onFailure(Throwable throwable) {
+                LOG.error("Error reading topology data for topology {}", topology, throwable);
             }
         });
     }
 
-    private void removeAffectedLinks(final TpId id) {
-        processor.enqueueOperation(new TopologyOperation() {
-            @Override
-            public void applyOperation(final ReadWriteTransaction transaction) {
-                CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture = transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
-                Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
-                    @Override
-                    public void onSuccess(Optional<Topology> topologyOptional) {
-                        if (topologyOptional.isPresent()) {
-                            List<Link> linkList = topologyOptional.get().getLink() != null
-                                    ? topologyOptional.get().getLink() : Collections.<Link> emptyList();
-                            for (Link link : linkList) {
-                                if (id.equals(link.getSource().getSourceTp()) || id.equals(link.getDestination().getDestTp())) {
-                                    transaction.delete(LogicalDatastoreType.OPERATIONAL, linkPath(link));
-                                }
-                            }
-                        }
-                    }
+    private void removeAffectedLinks(final NodeId id, Optional<Topology> topologyOptional) {
+        if (!topologyOptional.isPresent()) {
+            return;
+        }
+
+        List<Link> linkList = topologyOptional.get().getLink() != null ?
+                topologyOptional.get().getLink() : Collections.<Link> emptyList();
+        final List<InstanceIdentifier<Link>> linkIDsToDelete = Lists.newArrayList();
+        for (Link link : linkList) {
+            if (id.equals(link.getSource().getSourceNode()) ||
+                    id.equals(link.getDestination().getDestNode())) {
+                linkIDsToDelete.add(linkPath(link));
+            }
+        }
+
+        enqueueLinkDeletes(linkIDsToDelete);
+    }
 
-                    @Override
-                    public void onFailure(Throwable throwable) {
-                        LOG.error("Error reading topology data for topology {}", topology, throwable);
+    private void enqueueLinkDeletes(final Collection<InstanceIdentifier<Link>> linkIDsToDelete) {
+        if(!linkIDsToDelete.isEmpty()) {
+            processor.enqueueOperation(new TopologyOperation() {
+                @Override
+                public void applyOperation(ReadWriteTransaction transaction) {
+                    for(InstanceIdentifier<Link> linkID: linkIDsToDelete) {
+                        transaction.delete(LogicalDatastoreType.OPERATIONAL, linkID);
                     }
-                });
+                }
+
+                @Override
+                public String toString() {
+                    return "Delete Links " + linkIDsToDelete.size();
+                }
+            });
+        }
+    }
+
+    private void removeAffectedLinks(final TpId id, final ReadWriteTransaction transaction) {
+        CheckedFuture<Optional<Topology>, ReadFailedException> topologyDataFuture =
+                transaction.read(LogicalDatastoreType.OPERATIONAL, topology);
+        Futures.addCallback(topologyDataFuture, new FutureCallback<Optional<Topology>>() {
+            @Override
+            public void onSuccess(Optional<Topology> topologyOptional) {
+                removeAffectedLinks(id, topologyOptional);
+            }
+
+            @Override
+            public void onFailure(Throwable throwable) {
+                LOG.error("Error reading topology data for topology {}", topology, throwable);
             }
         });
     }
 
+    private void removeAffectedLinks(final TpId id, Optional<Topology> topologyOptional) {
+        if (!topologyOptional.isPresent()) {
+            return;
+        }
+
+        List<Link> linkList = topologyOptional.get().getLink() != null
+                ? topologyOptional.get().getLink() : Collections.<Link> emptyList();
+        final List<InstanceIdentifier<Link>> linkIDsToDelete = Lists.newArrayList();
+        for (Link link : linkList) {
+            if (id.equals(link.getSource().getSourceTp()) ||
+                    id.equals(link.getDestination().getDestTp())) {
+                linkIDsToDelete.add(linkPath(link));
+            }
+        }
+
+        enqueueLinkDeletes(linkIDsToDelete);
+    }
+
     private InstanceIdentifier<Node> getNodePath(final NodeId nodeId) {
         return topology.child(Node.class, new NodeKey(nodeId));
     }
index 1cf648eb975c521d6c81e22b927e8f276f888e99..f09da0045930cf7cc843de1a924e64841f2db508 100644 (file)
@@ -11,14 +11,17 @@ import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
+
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
+
 import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
 import org.opendaylight.controller.md.sal.binding.api.DataBroker;
 import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncTransaction;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionChain;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -50,9 +53,9 @@ final class OperationProcessor implements AutoCloseable, Runnable, TransactionCh
             for (; ; ) {
                 TopologyOperation op = queue.take();
 
-                LOG.debug("New operations available, starting transaction");
-                final ReadWriteTransaction tx = transactionChain.newReadWriteTransaction();
+                LOG.debug("New {} operation available, starting transaction", op);
 
+                final ReadWriteTransaction tx = transactionChain.newReadWriteTransaction();
 
                 int ops = 0;
                 do {
@@ -64,14 +67,16 @@ final class OperationProcessor implements AutoCloseable, Runnable, TransactionCh
                     } else {
                         op = null;
                     }
+
+                    LOG.debug("Next operation {}", op);
                 } while (op != null);
 
                 LOG.debug("Processed {} operations, submitting transaction", ops);
 
-                final CheckedFuture txResultFuture = tx.submit();
-                Futures.addCallback(txResultFuture, new FutureCallback() {
+                CheckedFuture<Void, TransactionCommitFailedException> txResultFuture = tx.submit();
+                Futures.addCallback(txResultFuture, new FutureCallback<Void>() {
                     @Override
-                    public void onSuccess(Object o) {
+                    public void onSuccess(Void notUsed) {
                         LOG.debug("Topology export successful for tx :{}", tx.getIdentifier());
                     }
 
diff --git a/opendaylight/md-sal/topology-manager/src/test/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporterTest.java b/opendaylight/md-sal/topology-manager/src/test/java/org/opendaylight/md/controller/topology/manager/FlowCapableTopologyExporterTest.java
new file mode 100644 (file)
index 0000000..b7a56a4
--- /dev/null
@@ -0,0 +1,666 @@
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.md.controller.topology.manager;
+
+import static org.junit.Assert.fail;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.atLeast;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.InOrder;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeConnectorUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdated;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.inventory.rev130819.FlowCapableNodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkDiscoveredBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.topology.discovery.rev130819.LinkRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.PortConfig;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.flow.types.port.rev130925.flow.capable.port.StateBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRef;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeRemovedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeUpdatedBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.Nodes;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnector;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNode;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.model.topology.inventory.rev131030.InventoryNodeConnector;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.LinkId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NetworkTopology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.NodeId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TopologyId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.TpId;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Destination;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.DestinationBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.Source;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.link.attributes.SourceBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.Topology;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.TopologyKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Link;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkBuilder;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.LinkKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.Node;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.NodeKey;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPoint;
+import org.opendaylight.yang.gen.v1.urn.tbd.params.xml.ns.yang.network.topology.rev131021.network.topology.topology.node.TerminationPointKey;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.KeyedInstanceIdentifier;
+
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.common.util.concurrent.Uninterruptibles;
+
+public class FlowCapableTopologyExporterTest {
+
+    @Mock
+    private DataBroker mockDataBroker;
+
+    @Mock
+    private BindingTransactionChain mockTxChain;
+
+    private OperationProcessor processor;
+
+    private FlowCapableTopologyExporter exporter;
+
+    private InstanceIdentifier<Topology> topologyIID;
+
+    private final ExecutorService executor = Executors.newFixedThreadPool(1);
+
+    @Before
+    public void setUp() {
+        MockitoAnnotations.initMocks(this);
+
+        doReturn(mockTxChain).when(mockDataBroker)
+                .createTransactionChain(any(TransactionChainListener.class));
+
+        processor = new OperationProcessor(mockDataBroker);
+
+        topologyIID = InstanceIdentifier.create(NetworkTopology.class)
+                .child(Topology.class, new TopologyKey(new TopologyId("test")));
+        exporter = new FlowCapableTopologyExporter(processor, topologyIID);
+
+        executor.execute(processor);
+    }
+
+    @After
+    public void tearDown() {
+        executor.shutdownNow();
+    }
+
+    @SuppressWarnings({ "rawtypes" })
+    @Test
+    public void testOnNodeRemoved() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                nodeKey = newInvNodeKey("node1");
+        InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+                org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+                nodeKey);
+
+        List<Link> linkList = Arrays.asList(
+                newLink("link1", newSourceNode("node1"), newDestNode("dest")),
+                newLink("link2", newSourceNode("source"), newDestNode("node1")),
+                newLink("link2", newSourceNode("source2"), newDestNode("dest2")));
+        final Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+        InstanceIdentifier[] expDeletedIIDs = {
+                topologyIID.child(Link.class, linkList.get(0).getKey()),
+                topologyIID.child(Link.class, linkList.get(1).getKey()),
+                topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+            };
+
+        SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+        ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
+        doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
+                .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+
+        CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
+
+        int expDeleteCalls = expDeletedIIDs.length;
+        CountDownLatch deleteLatch = new CountDownLatch(expDeleteCalls);
+        ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+                ArgumentCaptor.forClass(InstanceIdentifier.class);
+        setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
+
+        ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class);
+        setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch);
+        CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2);
+
+        doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build());
+
+        waitForSubmit(submitLatch1);
+
+        setReadFutureAsync(topology, readFuture);
+
+        waitForDeletes(expDeleteCalls, deleteLatch);
+
+        waitForSubmit(submitLatch2);
+
+        assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+
+        verifyMockTx(mockTx1);
+        verifyMockTx(mockTx2);
+    }
+
+    @SuppressWarnings({ "rawtypes" })
+    @Test
+    public void testOnNodeRemovedWithNoTopology() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                nodeKey = newInvNodeKey("node1");
+        InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+                org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+                nodeKey);
+
+        InstanceIdentifier[] expDeletedIIDs = {
+                topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+            };
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        doReturn(Futures.immediateCheckedFuture(Optional.absent())).when(mockTx)
+                .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+        CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+
+        CountDownLatch deleteLatch = new CountDownLatch(1);
+        ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+                ArgumentCaptor.forClass(InstanceIdentifier.class);
+        setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeRemoved(new NodeRemovedBuilder().setNodeRef(new NodeRef(invNodeID)).build());
+
+        waitForSubmit(submitLatch);
+
+        waitForDeletes(1, deleteLatch);
+
+        assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Test
+    public void testOnNodeConnectorRemoved() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                  nodeKey = newInvNodeKey("node1");
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+                newInvNodeConnKey("tp1");
+
+        InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+        List<Link> linkList = Arrays.asList(
+                newLink("link1", newSourceTp("tp1"), newDestTp("dest")),
+                newLink("link2", newSourceTp("source"), newDestTp("tp1")),
+                newLink("link3", newSourceTp("source2"), newDestTp("dest2")));
+        final Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+        InstanceIdentifier[] expDeletedIIDs = {
+                topologyIID.child(Link.class, linkList.get(0).getKey()),
+                topologyIID.child(Link.class, linkList.get(1).getKey()),
+                topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+                        .child(TerminationPoint.class, new TerminationPointKey(new TpId("tp1")))
+            };
+
+        final SettableFuture<Optional<Topology>> readFuture = SettableFuture.create();
+        ReadWriteTransaction mockTx1 = mock(ReadWriteTransaction.class);
+        doReturn(Futures.makeChecked(readFuture, ReadFailedException.MAPPER)).when(mockTx1)
+                .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+
+        CountDownLatch submitLatch1 = setupStubbedSubmit(mockTx1);
+
+        int expDeleteCalls = expDeletedIIDs.length;
+        CountDownLatch deleteLatch = new CountDownLatch(expDeleteCalls);
+        ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+                ArgumentCaptor.forClass(InstanceIdentifier.class);
+        setupStubbedDeletes(mockTx1, deletedLinkIDs, deleteLatch);
+
+        ReadWriteTransaction mockTx2 = mock(ReadWriteTransaction.class);
+        setupStubbedDeletes(mockTx2, deletedLinkIDs, deleteLatch);
+        CountDownLatch submitLatch2 = setupStubbedSubmit(mockTx2);
+
+        doReturn(mockTx1).doReturn(mockTx2).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef(
+                new NodeConnectorRef(invNodeConnID)).build());
+
+        waitForSubmit(submitLatch1);
+
+        setReadFutureAsync(topology, readFuture);
+
+        waitForDeletes(expDeleteCalls, deleteLatch);
+
+        waitForSubmit(submitLatch2);
+
+        assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+
+        verifyMockTx(mockTx1);
+        verifyMockTx(mockTx2);
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Test
+    public void testOnNodeConnectorRemovedWithNoTopology() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                  nodeKey = newInvNodeKey("node1");
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+                newInvNodeConnKey("tp1");
+
+        InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+        InstanceIdentifier[] expDeletedIIDs = {
+                topologyIID.child(Node.class, new NodeKey(new NodeId("node1")))
+                        .child(TerminationPoint.class, new TerminationPointKey(new TpId("tp1")))
+            };
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        doReturn(Futures.immediateCheckedFuture(Optional.absent())).when(mockTx)
+                .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+        CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+
+        CountDownLatch deleteLatch = new CountDownLatch(1);
+        ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+                ArgumentCaptor.forClass(InstanceIdentifier.class);
+        setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeConnectorRemoved(new NodeConnectorRemovedBuilder().setNodeConnectorRef(
+                new NodeConnectorRef(invNodeConnID)).build());
+
+        waitForSubmit(submitLatch);
+
+        waitForDeletes(1, deleteLatch);
+
+        assertDeletedIDs(expDeletedIIDs, deletedLinkIDs);
+    }
+
+    @Test
+    public void testOnNodeUpdated() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                            nodeKey = newInvNodeKey("node1");
+        InstanceIdentifier<?> invNodeID = InstanceIdentifier.create(Nodes.class).child(
+                org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+                nodeKey);
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeUpdated(new NodeUpdatedBuilder().setNodeRef(new NodeRef(invNodeID))
+                .setId(nodeKey.getId()).addAugmentation(FlowCapableNodeUpdated.class,
+                        new FlowCapableNodeUpdatedBuilder().build()).build());
+
+        waitForSubmit(submitLatch);
+
+        ArgumentCaptor<Node> mergedNode = ArgumentCaptor.forClass(Node.class);
+        NodeId expNodeId = new NodeId("node1");
+        verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(Node.class,
+                new NodeKey(expNodeId))), mergedNode.capture(), eq(true));
+        assertEquals("getNodeId", expNodeId, mergedNode.getValue().getNodeId());
+        InventoryNode augmentation = mergedNode.getValue().getAugmentation(InventoryNode.class);
+        assertNotNull("Missing augmentation", augmentation);
+        assertEquals("getInventoryNodeRef", new NodeRef(invNodeID), augmentation.getInventoryNodeRef());
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Test
+    public void testOnNodeConnectorUpdated() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                 nodeKey = newInvNodeKey("node1");
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+                newInvNodeConnKey("tp1");
+
+        InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+                new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+                        FlowCapableNodeConnectorUpdated.class,
+                        new FlowCapableNodeConnectorUpdatedBuilder().build()).build());
+
+        waitForSubmit(submitLatch);
+
+        ArgumentCaptor<TerminationPoint> mergedNode = ArgumentCaptor.forClass(TerminationPoint.class);
+        NodeId expNodeId = new NodeId("node1");
+        TpId expTpId = new TpId("tp1");
+        InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+                Node.class, new NodeKey(expNodeId)).child(TerminationPoint.class,
+                        new TerminationPointKey(expTpId));
+        verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+                mergedNode.capture(), eq(true));
+        assertEquals("getTpId", expTpId, mergedNode.getValue().getTpId());
+        InventoryNodeConnector augmentation = mergedNode.getValue().getAugmentation(
+                InventoryNodeConnector.class);
+        assertNotNull("Missing augmentation", augmentation);
+        assertEquals("getInventoryNodeConnectorRef", new NodeConnectorRef(invNodeConnID),
+                augmentation.getInventoryNodeConnectorRef());
+    }
+
+    @SuppressWarnings("rawtypes")
+    @Test
+    public void testOnNodeConnectorUpdatedWithLinkStateDown() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                 nodeKey = newInvNodeKey("node1");
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+                newInvNodeConnKey("tp1");
+
+        InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+        List<Link> linkList = Arrays.asList(newLink("link1", newSourceTp("tp1"), newDestTp("dest")));
+        Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        doReturn(Futures.immediateCheckedFuture(Optional.of(topology))).when(mockTx)
+                .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+        setupStubbedSubmit(mockTx);
+
+        CountDownLatch deleteLatch = new CountDownLatch(1);
+        ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+                ArgumentCaptor.forClass(InstanceIdentifier.class);
+        setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+                new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+                        FlowCapableNodeConnectorUpdated.class,
+                        new FlowCapableNodeConnectorUpdatedBuilder().setState(
+                                new StateBuilder().setLinkDown(true).build()).build()).build());
+
+        waitForDeletes(1, deleteLatch);
+
+        InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+                Node.class, new NodeKey(new NodeId("node1"))).child(TerminationPoint.class,
+                        new TerminationPointKey(new TpId("tp1")));
+
+        verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+                any(TerminationPoint.class), eq(true));
+
+        assertDeletedIDs(new InstanceIdentifier[]{topologyIID.child(Link.class,
+                linkList.get(0).getKey())}, deletedLinkIDs);
+    }
+
+
+    @SuppressWarnings("rawtypes")
+    @Test
+    public void testOnNodeConnectorUpdatedWithPortDown() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                 nodeKey = newInvNodeKey("node1");
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey =
+                newInvNodeConnKey("tp1");
+
+        InstanceIdentifier<?> invNodeConnID = newNodeConnID(nodeKey, ncKey);
+
+        List<Link> linkList = Arrays.asList(newLink("link1", newSourceTp("tp1"), newDestTp("dest")));
+        Topology topology = new TopologyBuilder().setLink(linkList).build();
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        doReturn(Futures.immediateCheckedFuture(Optional.of(topology))).when(mockTx)
+                .read(LogicalDatastoreType.OPERATIONAL, topologyIID);
+        setupStubbedSubmit(mockTx);
+
+        CountDownLatch deleteLatch = new CountDownLatch(1);
+        ArgumentCaptor<InstanceIdentifier> deletedLinkIDs =
+                ArgumentCaptor.forClass(InstanceIdentifier.class);
+        setupStubbedDeletes(mockTx, deletedLinkIDs, deleteLatch);
+
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onNodeConnectorUpdated(new NodeConnectorUpdatedBuilder().setNodeConnectorRef(
+                new NodeConnectorRef(invNodeConnID)).setId(ncKey.getId()).addAugmentation(
+                        FlowCapableNodeConnectorUpdated.class,
+                        new FlowCapableNodeConnectorUpdatedBuilder().setConfiguration(
+                                new PortConfig(true, true, true, true)).build()).build());
+
+        waitForDeletes(1, deleteLatch);
+
+        InstanceIdentifier<TerminationPoint> expTpPath = topologyIID.child(
+                Node.class, new NodeKey(new NodeId("node1"))).child(TerminationPoint.class,
+                        new TerminationPointKey(new TpId("tp1")));
+
+        verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(expTpPath),
+                any(TerminationPoint.class), eq(true));
+
+        assertDeletedIDs(new InstanceIdentifier[]{topologyIID.child(Link.class,
+                linkList.get(0).getKey())}, deletedLinkIDs);
+    }
+
+    @Test
+    public void testOnLinkDiscovered() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                sourceNodeKey = newInvNodeKey("sourceNode");
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+                sourceNodeConnKey = newInvNodeConnKey("sourceTP");
+        InstanceIdentifier<?> sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey);
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                destNodeKey = newInvNodeKey("destNode");
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+                destNodeConnKey = newInvNodeConnKey("destTP");
+        InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onLinkDiscovered(new LinkDiscoveredBuilder().setSource(
+                new NodeConnectorRef(sourceConnID)).setDestination(
+                        new NodeConnectorRef(destConnID)).build());
+
+        waitForSubmit(submitLatch);
+
+        ArgumentCaptor<Link> mergedNode = ArgumentCaptor.forClass(Link.class);
+        verify(mockTx).merge(eq(LogicalDatastoreType.OPERATIONAL), eq(topologyIID.child(
+                Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId())))),
+                mergedNode.capture(), eq(true));
+        assertEquals("Source node ID", "sourceNode",
+                mergedNode.getValue().getSource().getSourceNode().getValue());
+        assertEquals("Dest TP ID", "sourceTP",
+                mergedNode.getValue().getSource().getSourceTp().getValue());
+        assertEquals("Dest node ID", "destNode",
+                mergedNode.getValue().getDestination().getDestNode().getValue());
+        assertEquals("Dest TP ID", "destTP",
+                mergedNode.getValue().getDestination().getDestTp().getValue());
+    }
+
+    @Test
+    public void testOnLinkRemoved() {
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                sourceNodeKey = newInvNodeKey("sourceNode");
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+                sourceNodeConnKey = newInvNodeConnKey("sourceTP");
+        InstanceIdentifier<?> sourceConnID = newNodeConnID(sourceNodeKey, sourceNodeConnKey);
+
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                destNodeKey = newInvNodeKey("destNode");
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey
+                destNodeConnKey = newInvNodeConnKey("destTP");
+        InstanceIdentifier<?> destConnID = newNodeConnID(destNodeKey, destNodeConnKey);
+
+        ReadWriteTransaction mockTx = mock(ReadWriteTransaction.class);
+        CountDownLatch submitLatch = setupStubbedSubmit(mockTx);
+        doReturn(mockTx).when(mockTxChain).newReadWriteTransaction();
+
+        exporter.onLinkRemoved(new LinkRemovedBuilder().setSource(
+                new NodeConnectorRef(sourceConnID)).setDestination(
+                        new NodeConnectorRef(destConnID)).build());
+
+        waitForSubmit(submitLatch);
+
+        verify(mockTx).delete(LogicalDatastoreType.OPERATIONAL, topologyIID.child(
+                Link.class, new LinkKey(new LinkId(sourceNodeConnKey.getId()))));
+    }
+
+    private void verifyMockTx(ReadWriteTransaction mockTx) {
+        InOrder inOrder = inOrder(mockTx);
+        inOrder.verify(mockTx, atLeast(0)).submit();
+        inOrder.verify(mockTx, never()).delete(eq(LogicalDatastoreType.OPERATIONAL),
+              any(InstanceIdentifier.class));
+    }
+
+    @SuppressWarnings("rawtypes")
+    private void assertDeletedIDs(InstanceIdentifier[] expDeletedIIDs,
+            ArgumentCaptor<InstanceIdentifier> deletedLinkIDs) {
+        Set<InstanceIdentifier> actualIIDs = new HashSet<>(deletedLinkIDs.getAllValues());
+        for(InstanceIdentifier id: expDeletedIIDs) {
+            assertTrue("Missing expected deleted IID " + id, actualIIDs.contains(id));
+        }
+    }
+
+    private void setReadFutureAsync(final Topology topology,
+            final SettableFuture<Optional<Topology>> readFuture) {
+        new Thread() {
+            @Override
+            public void run() {
+                Uninterruptibles.sleepUninterruptibly(50, TimeUnit.MILLISECONDS);
+                readFuture.set(Optional.of(topology));
+            }
+
+        }.start();
+    }
+
+    private void waitForSubmit(CountDownLatch latch) {
+        assertEquals("Transaction submitted", true,
+                Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS));
+    }
+
+    private void waitForDeletes(int expDeleteCalls, final CountDownLatch latch) {
+        boolean done = Uninterruptibles.awaitUninterruptibly(latch, 5, TimeUnit.SECONDS);
+        if(!done) {
+            fail("Expected " + expDeleteCalls + " delete calls. Actual: " +
+                    (expDeleteCalls - latch.getCount()));
+        }
+    }
+
+    private CountDownLatch setupStubbedSubmit(ReadWriteTransaction mockTx) {
+        final CountDownLatch latch = new CountDownLatch(1);
+        doAnswer(new Answer<CheckedFuture<Void, TransactionCommitFailedException>>() {
+            @Override
+            public CheckedFuture<Void, TransactionCommitFailedException> answer(
+                                                            InvocationOnMock invocation) {
+                latch.countDown();
+                return Futures.immediateCheckedFuture(null);
+            }
+        }).when(mockTx).submit();
+
+        return latch;
+    }
+
+    @SuppressWarnings("rawtypes")
+    private void setupStubbedDeletes(ReadWriteTransaction mockTx,
+            ArgumentCaptor<InstanceIdentifier> deletedLinkIDs, final CountDownLatch latch) {
+        doAnswer(new Answer<Void>() {
+            @Override
+            public Void answer(InvocationOnMock invocation) {
+                latch.countDown();
+                return null;
+            }
+        }).when(mockTx).delete(eq(LogicalDatastoreType.OPERATIONAL), deletedLinkIDs.capture());
+    }
+
+    private org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey
+                                                                        newInvNodeKey(String id) {
+        org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey nodeKey =
+                new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey(
+                        new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.
+                                                                      rev130819.NodeId(id));
+        return nodeKey;
+    }
+
+    private NodeConnectorKey newInvNodeConnKey(String id) {
+        return new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey(
+                new org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.
+                                                               NodeConnectorId(id));
+    }
+
+    private KeyedInstanceIdentifier<NodeConnector, NodeConnectorKey> newNodeConnID(
+            org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.NodeKey nodeKey,
+            org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.node.NodeConnectorKey ncKey) {
+        return InstanceIdentifier.create(Nodes.class).child(
+                org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.nodes.Node.class,
+                nodeKey).child(org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.
+                        rev130819.node.NodeConnector.class, ncKey);
+    }
+
+    private Link newLink(String id, Source source, Destination dest) {
+        return new LinkBuilder().setLinkId(new LinkId(id))
+                .setSource(source).setDestination(dest).build();
+    }
+
+    private Destination newDestTp(String id) {
+        return new DestinationBuilder().setDestTp(new TpId(id)).build();
+    }
+
+    private Source newSourceTp(String id) {
+        return new SourceBuilder().setSourceTp(new TpId(id)).build();
+    }
+
+    private Destination newDestNode(String id) {
+        return new DestinationBuilder().setDestNode(new NodeId(id)).build();
+    }
+
+    private Source newSourceNode(String id) {
+        return new SourceBuilder().setSourceNode(new NodeId(id)).build();
+    }
+}
index 8720c654c61f993f09d6503305fdc471d4065a94..1d12292edb2624ee12b7d1c469c44102ab617dfc 100644 (file)
@@ -9,6 +9,7 @@ package org.opendaylight.controller.netconf.confignetconfconnector.mapping.attri
 
 import com.google.common.base.Preconditions;
 
+import com.google.common.base.Strings;
 import java.util.List;
 import java.util.Map;
 
@@ -52,7 +53,7 @@ public class ObjectNameAttributeReadingStrategy extends AbstractAttributeReading
     public static String checkPrefixAndExtractServiceName(XmlElement typeElement, Map.Entry<String, String> prefixNamespace) throws NetconfDocumentedException {
         String serviceName = typeElement.getTextContent();
         // FIXME: comparing Entry with String:
-        Preconditions.checkState(!prefixNamespace.equals(""), "Service %s value not prefixed with namespace",
+        Preconditions.checkState(!Strings.isNullOrEmpty(prefixNamespace.getKey()), "Service %s value not prefixed with namespace",
                 XmlNetconfConstants.TYPE_KEY);
         String prefix = prefixNamespace.getKey() + PREFIX_SEPARATOR;
         Preconditions.checkState(serviceName.startsWith(prefix),
index 8e7ba708c6cea71ba46400f99fb765a2113bf762..94a90a782ebbd82e5a35643d27e15f9607248537 100644 (file)
@@ -47,11 +47,11 @@ public class SimpleIdentityRefAttributeReadingStrategy extends SimpleAttributeRe
         Date revision = null;
         Map<Date, EditConfig.IdentityMapping> revisions = identityMap.get(namespace);
         if(revisions.keySet().size() > 1) {
-            for (Date date : revisions.keySet()) {
-                if(revisions.get(date).containsIdName(localName)) {
+            for (Map.Entry<Date, EditConfig.IdentityMapping> revisionToIdentityEntry : revisions.entrySet()) {
+                if(revisionToIdentityEntry.getValue().containsIdName(localName)) {
                     Preconditions.checkState(revision == null, "Duplicate identity %s, in namespace %s, with revisions: %s, %s detected. Cannot map attribute",
-                            localName, namespace, revision, date);
-                    revision = date;
+                            localName, namespace, revision, revisionToIdentityEntry.getKey());
+                    revision = revisionToIdentityEntry.getKey();
                 }
             }
         } else {
index 5d41b784f5cdaa4ff264774b05069ad6e83f3ff3..773e4ee933c147d6206394b8da51fd4f8094358f 100644 (file)
@@ -60,11 +60,11 @@ public class Config {
 
         Map<String, Map<String, Collection<ObjectName>>> retVal = Maps.newLinkedHashMap();
 
-        for (String namespace : configs.keySet()) {
+        for (Entry<String, Map<String, ModuleConfig>> namespaceToModuleToConfigEntry : configs.entrySet()) {
 
             Map<String, Collection<ObjectName>> innerRetVal = Maps.newHashMap();
 
-            for (Entry<String, ModuleConfig> mbeEntry : configs.get(namespace).entrySet()) {
+            for (Entry<String, ModuleConfig> mbeEntry : namespaceToModuleToConfigEntry.getValue().entrySet()) {
 
                 String moduleName = mbeEntry.getKey();
                 Collection<ObjectName> instances = moduleToInstances.get(moduleName);
@@ -80,7 +80,7 @@ public class Config {
 
             }
 
-            retVal.put(namespace, innerRetVal);
+            retVal.put(namespaceToModuleToConfigEntry.getKey(), innerRetVal);
         }
         return retVal;
     }
@@ -107,18 +107,18 @@ public class Config {
 
         Element modulesElement = XmlUtil.createElement(document, XmlNetconfConstants.MODULES_KEY, Optional.of(XmlNetconfConstants.URN_OPENDAYLIGHT_PARAMS_XML_NS_YANG_CONTROLLER_CONFIG));
         dataElement.appendChild(modulesElement);
-        for (String moduleNamespace : moduleToInstances.keySet()) {
-            for (Entry<String, Collection<ObjectName>> moduleMappingEntry : moduleToInstances.get(moduleNamespace)
+        for (Entry<String, Map<String, Collection<ObjectName>>> moduleToInstanceEntry : moduleToInstances.entrySet()) {
+            for (Entry<String, Collection<ObjectName>> moduleMappingEntry : moduleToInstanceEntry.getValue()
                     .entrySet()) {
 
-                ModuleConfig mapping = moduleConfigs.get(moduleNamespace).get(moduleMappingEntry.getKey());
+                ModuleConfig mapping = moduleConfigs.get(moduleToInstanceEntry.getKey()).get(moduleMappingEntry.getKey());
 
                 if (moduleMappingEntry.getValue().isEmpty()) {
                     continue;
                 }
 
                 for (ObjectName objectName : moduleMappingEntry.getValue()) {
-                    modulesElement.appendChild(mapping.toXml(objectName, document, moduleNamespace));
+                    modulesElement.appendChild(mapping.toXml(objectName, document, moduleToInstanceEntry.getKey()));
                 }
 
             }
index 2b363ea153960eeb98754855ffa54473e43fad14..8c2c74f2ac58d1c2144a1e8194cb46e15eb279b9 100644 (file)
@@ -24,7 +24,6 @@ public class ServiceRegistryWrapper {
         this.configServiceRefRegistry = configServiceRefRegistry;
     }
 
-
     public ObjectName getByServiceAndRefName(String namespace, String serviceName, String refName) {
         Map<String, Map<String, String>> serviceNameToRefNameToInstance = getMappedServices().get(namespace);
 
@@ -61,13 +60,13 @@ public class ServiceRegistryWrapper {
         Map<String, Map<String, Map<String, String>>> retVal = Maps.newHashMap();
 
         Map<String, Map<String, ObjectName>> serviceMapping = configServiceRefRegistry.getServiceMapping();
-        for (String serviceQName : serviceMapping.keySet()){
-            for (String refName : serviceMapping.get(serviceQName).keySet()) {
+        for (Map.Entry<String, Map<String, ObjectName>> qNameToRefNameEntry : serviceMapping.entrySet()){
+            for (String refName : qNameToRefNameEntry.getValue().keySet()) {
 
-                ObjectName on = serviceMapping.get(serviceQName).get(refName);
+                ObjectName on = qNameToRefNameEntry.getValue().get(refName);
                 Services.ServiceInstance si = Services.ServiceInstance.fromObjectName(on);
 
-                QName qname = QName.create(serviceQName);
+                QName qname = QName.create(qNameToRefNameEntry.getKey());
                 String namespace = qname.getNamespace().toString();
                 Map<String, Map<String, String>> serviceToRefs = retVal.get(namespace);
                 if(serviceToRefs==null) {
index 59a1d4fe7141bbe54a2dc770a4d2c789819bf52d..bdb4c1b067ff19947a01484165ba25563ba53e0a 100644 (file)
@@ -133,9 +133,9 @@ public final class Services {
         Element root = XmlUtil.createElement(document, XmlNetconfConstants.SERVICES_KEY, Optional.of(XmlNetconfConstants.URN_OPENDAYLIGHT_PARAMS_XML_NS_YANG_CONTROLLER_CONFIG));
 
         Map<String, Map<String, Map<String, String>>> mappedServices = serviceRegistryWrapper.getMappedServices();
-        for (String namespace : mappedServices.keySet()) {
+        for (Entry<String, Map<String, Map<String, String>>> namespaceToRefEntry : mappedServices.entrySet()) {
 
-            for (Entry<String, Map<String, String>> serviceEntry : mappedServices.get(namespace).entrySet()) {
+            for (Entry<String, Map<String, String>> serviceEntry : namespaceToRefEntry.getValue().entrySet()) {
                 // service belongs to config.yang namespace
                 Element serviceElement = XmlUtil.createElement(document, SERVICE_KEY, Optional.<String>absent());
                 root.appendChild(serviceElement);
@@ -143,7 +143,7 @@ public final class Services {
                 // type belongs to config.yang namespace
                 String serviceType = serviceEntry.getKey();
                 Element typeElement = XmlUtil.createTextElementWithNamespacedContent(document, XmlNetconfConstants.TYPE_KEY,
-                        XmlNetconfConstants.PREFIX, namespace, serviceType);
+                        XmlNetconfConstants.PREFIX, namespaceToRefEntry.getKey(), serviceType);
 
                 serviceElement.appendChild(typeElement);
 
index 543a2c4a630d90de73dbf7aa0f837bf7dd2328f7..c22dcfe67bfb00233758b3b05cdc557ae5935a32 100644 (file)
@@ -148,21 +148,20 @@ public class EditConfig extends AbstractConfigNetconfOperation {
         Map<String, Map<String, Map<String, Services.ServiceInstance>>> namespaceToServiceNameToRefNameToInstance = services
                 .getNamespaceToServiceNameToRefNameToInstance();
 
-        for (String serviceNamespace : namespaceToServiceNameToRefNameToInstance.keySet()) {
-            for (String serviceName : namespaceToServiceNameToRefNameToInstance.get(serviceNamespace).keySet()) {
+        for (Map.Entry<String, Map<String, Map<String, Services.ServiceInstance>>> namespaceToServiceToRefEntry : namespaceToServiceNameToRefNameToInstance.entrySet()) {
+            for (Map.Entry<String, Map<String, Services.ServiceInstance>> serviceToRefEntry : namespaceToServiceToRefEntry.getValue().entrySet()) {
 
-                String qnameOfService = getQname(ta, serviceNamespace, serviceName);
-                Map<String, Services.ServiceInstance> refNameToInstance = namespaceToServiceNameToRefNameToInstance
-                        .get(serviceNamespace).get(serviceName);
+                String qnameOfService = getQname(ta, namespaceToServiceToRefEntry.getKey(), serviceToRefEntry.getKey());
+                Map<String, Services.ServiceInstance> refNameToInstance = serviceToRefEntry.getValue();
 
-                for (String refName : refNameToInstance.keySet()) {
-                    ObjectName on = refNameToInstance.get(refName).getObjectName(ta.getTransactionName());
+                for (Map.Entry<String, Services.ServiceInstance> refNameToServiceEntry : refNameToInstance.entrySet()) {
+                    ObjectName on = refNameToServiceEntry.getValue().getObjectName(ta.getTransactionName());
                     try {
-                        ObjectName saved = ta.saveServiceReference(qnameOfService, refName, on);
+                        ObjectName saved = ta.saveServiceReference(qnameOfService, refNameToServiceEntry.getKey(), on);
                         logger.debug("Saving service {} with on {} under name {} with service on {}", qnameOfService,
-                                on, refName, saved);
+                                on, refNameToServiceEntry.getKey(), saved);
                     } catch (InstanceNotFoundException e) {
-                        throw new NetconfDocumentedException(String.format("Unable to save ref name " + refName + " for instance " + on, e),
+                        throw new NetconfDocumentedException(String.format("Unable to save ref name " + refNameToServiceEntry.getKey() + " for instance " + on, e),
                                 ErrorType.application,
                                 ErrorTag.operation_failed,
                                 ErrorSeverity.error);
@@ -271,18 +270,18 @@ public class EditConfig extends AbstractConfigNetconfOperation {
 
         Map<String, Map<String, ModuleConfig>> namespaceToModuleNameToModuleConfig = Maps.newHashMap();
 
-        for (String namespace : mBeanEntries.keySet()) {
-            for (Map.Entry<String, ModuleMXBeanEntry> moduleNameToMbe : mBeanEntries.get(namespace).entrySet()) {
+        for (Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleToMbe : mBeanEntries.entrySet()) {
+            for (Map.Entry<String, ModuleMXBeanEntry> moduleNameToMbe : namespaceToModuleToMbe.getValue().entrySet()) {
                 String moduleName = moduleNameToMbe.getKey();
                 ModuleMXBeanEntry moduleMXBeanEntry = moduleNameToMbe.getValue();
 
                 ModuleConfig moduleConfig = new ModuleConfig(moduleName,
                         new InstanceConfig(configRegistryClient,moduleMXBeanEntry.getAttributes(), moduleMXBeanEntry.getNullableDummyContainerName()));
 
-                Map<String, ModuleConfig> moduleNameToModuleConfig = namespaceToModuleNameToModuleConfig.get(namespace);
+                Map<String, ModuleConfig> moduleNameToModuleConfig = namespaceToModuleNameToModuleConfig.get(namespaceToModuleToMbe.getKey());
                 if(moduleNameToModuleConfig == null) {
                     moduleNameToModuleConfig = Maps.newHashMap();
-                    namespaceToModuleNameToModuleConfig.put(namespace, moduleNameToModuleConfig);
+                    namespaceToModuleNameToModuleConfig.put(namespaceToModuleToMbe.getKey(), moduleNameToModuleConfig);
                 }
 
                 moduleNameToModuleConfig.put(moduleName, moduleConfig);
index fc95046dfdbef94e3e139480cae950c7010a83f9..9155bb95d2e73e812c46c15b3ff973cabb92875f 100644 (file)
@@ -54,13 +54,13 @@ public class Get extends AbstractConfigNetconfOperation {
             Map<String, Map<String, ModuleMXBeanEntry>> mBeanEntries) {
         Map<String, Map<String, ModuleRuntime>> retVal = Maps.newHashMap();
 
-        for (String namespace : mBeanEntries.keySet()) {
+        for (Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleEntry : mBeanEntries.entrySet()) {
 
             Map<String, ModuleRuntime> innerMap = Maps.newHashMap();
-            Map<String, ModuleMXBeanEntry> entriesFromNamespace = mBeanEntries.get(namespace);
-            for (String module : entriesFromNamespace.keySet()) {
+            Map<String, ModuleMXBeanEntry> entriesFromNamespace = namespaceToModuleEntry.getValue();
+            for (Map.Entry<String, ModuleMXBeanEntry> moduleToMXEntry : entriesFromNamespace.entrySet()) {
 
-                ModuleMXBeanEntry mbe = entriesFromNamespace.get(module);
+                ModuleMXBeanEntry mbe = moduleToMXEntry.getValue();
 
                 Map<RuntimeBeanEntry, InstanceConfig> cache = Maps.newHashMap();
                 RuntimeBeanEntry root = null;
@@ -77,10 +77,10 @@ public class Get extends AbstractConfigNetconfOperation {
 
                 InstanceRuntime rootInstanceRuntime = createInstanceRuntime(root, cache);
                 ModuleRuntime moduleRuntime = new ModuleRuntime(rootInstanceRuntime);
-                innerMap.put(module, moduleRuntime);
+                innerMap.put(moduleToMXEntry.getKey(), moduleRuntime);
             }
 
-            retVal.put(namespace, innerMap);
+            retVal.put(namespaceToModuleEntry.getKey(), innerMap);
         }
         return retVal;
     }
index 21021fec55661f69cd2c1e19325f0dd651e07193..0d9c61bad7b17b961294ce3562a7abca56f4d851 100644 (file)
@@ -81,8 +81,7 @@ public class RuntimeRpc extends AbstractConfigNetconfOperation {
         final String[] signature = new String[attributes.size()];
 
         int i = 0;
-        for (final String attrName : attributes.keySet()) {
-            final AttributeConfigElement attribute = attributes.get(attrName);
+        for (final AttributeConfigElement attribute : attributes.values()) {
             final Optional<?> resolvedValueOpt = attribute.getResolvedValue();
 
             params[i] = resolvedValueOpt.isPresent() ? resolvedValueOpt.get() : attribute.getResolvedDefaultValue();
@@ -248,23 +247,23 @@ public class RuntimeRpc extends AbstractConfigNetconfOperation {
 
         final Map<String, Map<String, ModuleRpcs>> map = Maps.newHashMap();
 
-        for (final String namespace : mBeanEntries.keySet()) {
+        for (final Map.Entry<String, Map<String, ModuleMXBeanEntry>> namespaceToModuleEntry : mBeanEntries.entrySet()) {
 
-            Map<String, ModuleRpcs> namespaceToModules = map.get(namespace);
+            Map<String, ModuleRpcs> namespaceToModules = map.get(namespaceToModuleEntry.getKey());
             if (namespaceToModules == null) {
                 namespaceToModules = Maps.newHashMap();
-                map.put(namespace, namespaceToModules);
+                map.put(namespaceToModuleEntry.getKey(), namespaceToModules);
             }
 
-            for (final String moduleName : mBeanEntries.get(namespace).keySet()) {
+            for (final Map.Entry<String, ModuleMXBeanEntry> moduleEntry : namespaceToModuleEntry.getValue().entrySet()) {
 
-                ModuleRpcs rpcMapping = namespaceToModules.get(moduleName);
+                ModuleRpcs rpcMapping = namespaceToModules.get(moduleEntry.getKey());
                 if (rpcMapping == null) {
                     rpcMapping = new ModuleRpcs();
-                    namespaceToModules.put(moduleName, rpcMapping);
+                    namespaceToModules.put(moduleEntry.getKey(), rpcMapping);
                 }
 
-                final ModuleMXBeanEntry entry = mBeanEntries.get(namespace).get(moduleName);
+                final ModuleMXBeanEntry entry = moduleEntry.getValue();
 
                 for (final RuntimeBeanEntry runtimeEntry : entry.getRuntimeBeans()) {
                     rpcMapping.addNameMapping(runtimeEntry);
index beab62e997457055ce56ddbf646d40292905bf6a..11d2e748bfff0f2c3f4060e957a61b06d5e7e624 100644 (file)
@@ -55,7 +55,7 @@ public class TransactionProvider implements AutoCloseable {
         allOpenedTransactions.clear();
     }
 
-    public Optional<ObjectName> getTransaction() {
+    public synchronized Optional<ObjectName> getTransaction() {
 
         if (transaction == null){
             return Optional.absent();
index 40a15be70616217d81529b003f899236865d8ac8..d72c26cb775dd76f34645d1d5bc533eb4bc082fe 100644 (file)
@@ -34,18 +34,20 @@ public class ConfigPersisterNotificationHandler implements Closeable {
 
     private static final Logger logger = LoggerFactory.getLogger(ConfigPersisterNotificationHandler.class);
     private final MBeanServerConnection mBeanServerConnection;
-    private final ConfigPersisterNotificationListener listener;
+    private final NotificationListener listener;
 
 
-    public ConfigPersisterNotificationHandler(MBeanServerConnection mBeanServerConnection,
-                                              Persister persisterAggregator) {
+    public ConfigPersisterNotificationHandler(final MBeanServerConnection mBeanServerConnection, final Persister persisterAggregator) {
+        this(mBeanServerConnection, new ConfigPersisterNotificationListener(persisterAggregator));
+    }
+
+    public ConfigPersisterNotificationHandler(final MBeanServerConnection mBeanServerConnection, final NotificationListener notificationListener) {
         this.mBeanServerConnection = mBeanServerConnection;
-        listener = new ConfigPersisterNotificationListener(persisterAggregator);
+        this.listener = notificationListener;
         registerAsJMXListener(mBeanServerConnection, listener);
-
     }
 
-    private static void registerAsJMXListener(MBeanServerConnection mBeanServerConnection, ConfigPersisterNotificationListener listener) {
+    private static void registerAsJMXListener(final MBeanServerConnection mBeanServerConnection, final NotificationListener listener) {
         logger.trace("Called registerAsJMXListener");
         try {
             mBeanServerConnection.addNotificationListener(DefaultCommitOperationMXBean.OBJECT_NAME, listener, null, null);
@@ -57,12 +59,12 @@ public class ConfigPersisterNotificationHandler implements Closeable {
     @Override
     public synchronized void close() {
         // unregister from JMX
-        ObjectName on = DefaultCommitOperationMXBean.OBJECT_NAME;
+        final ObjectName on = DefaultCommitOperationMXBean.OBJECT_NAME;
         try {
             if (mBeanServerConnection.isRegistered(on)) {
                 mBeanServerConnection.removeNotificationListener(on, listener);
             }
-        } catch (Exception e) {
+        } catch (final Exception e) {
             logger.warn("Unable to unregister {} as listener for {}", listener, on, e);
         }
     }
@@ -73,12 +75,12 @@ class ConfigPersisterNotificationListener implements NotificationListener {
 
     private final Persister persisterAggregator;
 
-    ConfigPersisterNotificationListener(Persister persisterAggregator) {
+    ConfigPersisterNotificationListener(final Persister persisterAggregator) {
         this.persisterAggregator = persisterAggregator;
     }
 
     @Override
-    public void handleNotification(Notification notification, Object handback) {
+    public void handleNotification(final Notification notification, final Object handback) {
         if (!(notification instanceof NetconfJMXNotification))
             return;
 
@@ -89,7 +91,7 @@ class ConfigPersisterNotificationListener implements NotificationListener {
         if (notification instanceof CommitJMXNotification) {
             try {
                 handleAfterCommitNotification((CommitJMXNotification) notification);
-            } catch (Exception e) {
+            } catch (final Exception e) {
                 // log exceptions from notification Handler here since
                 // notificationBroadcastSupport logs only DEBUG level
                 logger.warn("Failed to handle notification {}", notification, e);
@@ -105,7 +107,7 @@ class ConfigPersisterNotificationListener implements NotificationListener {
             persisterAggregator.persistConfig(new CapabilityStrippingConfigSnapshotHolder(notification.getConfigSnapshot(),
                     notification.getCapabilities()));
             logger.trace("Configuration persisted successfully");
-        } catch (IOException e) {
+        } catch (final IOException e) {
             throw new RuntimeException("Unable to persist configuration snapshot", e);
         }
     }
index 5f311b5232ed676263925ce6af94b779c662df2e..b346522f4498b587c5550f7898f6a9639b48f395 100644 (file)
@@ -10,6 +10,9 @@ package org.opendaylight.controller.netconf.persist.impl;
 
 import static com.google.common.base.Preconditions.checkNotNull;
 
+import com.google.common.base.Function;
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.Collections2;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.Collection;
@@ -23,10 +26,9 @@ import java.util.TreeMap;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
-
+import javax.annotation.Nonnull;
 import javax.annotation.concurrent.Immutable;
 import javax.management.MBeanServerConnection;
-
 import org.opendaylight.controller.config.api.ConflictingVersionException;
 import org.opendaylight.controller.config.persist.api.ConfigPusher;
 import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
@@ -49,10 +51,6 @@ import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.xml.sax.SAXException;
 
-import com.google.common.base.Function;
-import com.google.common.base.Stopwatch;
-import com.google.common.collect.Collections2;
-
 @Immutable
 public class ConfigPusherImpl implements ConfigPusher {
     private static final Logger logger = LoggerFactory.getLogger(ConfigPusherImpl.class);
@@ -200,7 +198,7 @@ public class ConfigPusherImpl implements ConfigPusher {
     private static Set<String> computeNotFoundCapabilities(Set<String> expectedCapabilities, NetconfOperationService serviceCandidate) {
         Collection<String> actual = Collections2.transform(serviceCandidate.getCapabilities(), new Function<Capability, String>() {
             @Override
-            public String apply(Capability input) {
+            public String apply(@Nonnull final Capability input) {
                 return input.getCapabilityUri();
             }
         });
index a0e7974b942951c521305ae4726baa4a5a74d74a..7e68ac18757e194d3c4b405f69c6a7f2453c9044 100644 (file)
@@ -147,8 +147,8 @@ public final class PersisterAggregator implements Persister {
     public void persistConfig(ConfigSnapshotHolder holder) throws IOException {
         for (PersisterWithConfiguration persisterWithConfiguration: persisterWithConfigurations){
             if (!persisterWithConfiguration.readOnly){
-                logger.debug("Calling {}.persistConfig",persisterWithConfiguration.storage);
-                persisterWithConfiguration.storage.persistConfig(holder);
+                logger.debug("Calling {}.persistConfig", persisterWithConfiguration.getStorage());
+                persisterWithConfiguration.getStorage().persistConfig(holder);
             }
         }
     }
diff --git a/opendaylight/netconf/config-persister-impl/src/test/java/org/opendaylight/controller/netconf/persist/impl/ConfigPersisterNotificationHandlerTest.java b/opendaylight/netconf/config-persister-impl/src/test/java/org/opendaylight/controller/netconf/persist/impl/ConfigPersisterNotificationHandlerTest.java
new file mode 100644 (file)
index 0000000..f16083e
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.persist.impl;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import javax.management.MBeanServerConnection;
+
+import javax.management.NotificationFilter;
+import javax.management.NotificationListener;
+import javax.management.ObjectName;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.config.persist.api.Persister;
+
+public class ConfigPersisterNotificationHandlerTest {
+
+    @Mock
+    private MBeanServerConnection mBeanServer;
+    @Mock
+    private Persister notificationListener;
+
+    @Before
+    public void setUp() throws Exception {
+        MockitoAnnotations.initMocks(this);
+        doNothing().when(mBeanServer).addNotificationListener(any(ObjectName.class), any(NotificationListener.class),
+                any(NotificationFilter.class), anyObject());
+    }
+
+    @Test
+    public void testNotificationHandler() throws Exception {
+        doReturn(true).when(mBeanServer).isRegistered(any(ObjectName.class));
+        doThrow(Exception.class).when(mBeanServer).removeNotificationListener(any(ObjectName.class), any(NotificationListener.class));
+
+        final ConfigPersisterNotificationHandler testedHandler = new ConfigPersisterNotificationHandler(mBeanServer, notificationListener);
+        verify(mBeanServer).addNotificationListener(any(ObjectName.class), any(NotificationListener.class),
+                any(NotificationFilter.class), anyObject());
+
+        testedHandler.close();
+        verify(mBeanServer).removeNotificationListener(any(ObjectName.class), any(NotificationListener.class));
+    }
+
+    @Test
+    public void testNotificationHandlerCloseNotRegistered() throws Exception {
+        doReturn(false).when(mBeanServer).isRegistered(any(ObjectName.class));
+
+        final ConfigPersisterNotificationHandler testedHandler = new ConfigPersisterNotificationHandler(mBeanServer, notificationListener);
+
+        testedHandler.close();
+        verify(mBeanServer, times(0)).removeNotificationListener(any(ObjectName.class), any(NotificationListener.class));
+    }
+}
diff --git a/opendaylight/netconf/config-persister-impl/src/test/java/org/opendaylight/controller/netconf/persist/impl/ConfigPersisterNotificationListenerTest.java b/opendaylight/netconf/config-persister-impl/src/test/java/org/opendaylight/controller/netconf/persist/impl/ConfigPersisterNotificationListenerTest.java
new file mode 100644 (file)
index 0000000..bf031f1
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.persist.impl;
+
+import java.util.Collections;
+
+import javax.management.Notification;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Matchers;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
+import org.opendaylight.controller.config.persist.api.Persister;
+import org.opendaylight.controller.netconf.api.jmx.CommitJMXNotification;
+import org.opendaylight.controller.netconf.api.jmx.NetconfJMXNotification;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+
+import com.google.common.collect.Lists;
+
+public class ConfigPersisterNotificationListenerTest {
+
+    @Mock
+    private Persister mockPersister;
+    private PersisterAggregator persisterAggregator;
+
+    @Mock
+    private NetconfJMXNotification unknownNetconfNotif;
+    @Mock
+    private CommitJMXNotification commitNetconfNotif;
+    @Mock
+    private Notification unknownNotif;
+
+    @Before
+    public void setUp() throws Exception {
+        MockitoAnnotations.initMocks(this);
+
+        Mockito.doNothing().when(mockPersister).persistConfig(Matchers.any(ConfigSnapshotHolder.class));
+        Mockito.doReturn("persister").when(mockPersister).toString();
+        final PersisterAggregator.PersisterWithConfiguration withCfg = new PersisterAggregator.PersisterWithConfiguration(mockPersister, false);
+        persisterAggregator = new PersisterAggregator(Lists.newArrayList(withCfg));
+
+        Mockito.doReturn("netconfUnknownNotification").when(unknownNetconfNotif).toString();
+        Mockito.doReturn("netconfCommitNotification").when(commitNetconfNotif).toString();
+
+        Mockito.doReturn(XmlUtil.readXmlToElement("<config-snapshot/>")).when(commitNetconfNotif).getConfigSnapshot();
+        Mockito.doReturn(Collections.emptySet()).when(commitNetconfNotif).getCapabilities();
+
+    }
+
+    @Test
+    public void testNotificationListenerUnknownNotification() throws Exception {
+        final ConfigPersisterNotificationListener testeListener = new ConfigPersisterNotificationListener(persisterAggregator);
+        testeListener.handleNotification(unknownNotif, null);
+        Mockito.verifyZeroInteractions(mockPersister);
+    }
+
+    @Test
+    public void testNotificationListenerUnknownNetconfNotification() throws Exception {
+        final ConfigPersisterNotificationListener testeListener = new ConfigPersisterNotificationListener(persisterAggregator);
+        try {
+            testeListener.handleNotification(unknownNetconfNotif, null);
+            Assert.fail("Unknown netconf notification should fail");
+        } catch (final IllegalStateException e) {
+            Mockito.verifyZeroInteractions(mockPersister);
+        }
+    }
+
+    @Test
+    public void testNotificationListenerCommitNetconfNotification() throws Exception {
+        final ConfigPersisterNotificationListener testeListener = new ConfigPersisterNotificationListener(persisterAggregator);
+        testeListener.handleNotification(commitNetconfNotif, null);
+        Mockito.verify(mockPersister).persistConfig(Matchers.any(ConfigSnapshotHolder.class));
+    }
+}
index acea75a7432d37dbede7c219ed961784b1f63b86..cd646aeb079100a72a230f9d62624504a72a5ad2 100644 (file)
@@ -8,6 +8,8 @@
 
 package org.opendaylight.controller.netconf.persist.impl;
 
+import com.google.common.collect.Lists;
+
 import org.junit.Test;
 import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
 import org.opendaylight.controller.config.persist.api.Persister;
@@ -87,6 +89,22 @@ public class PersisterAggregatorTest {
         assertEquals(1, DummyAdapter.props);
     }
 
+    @Test
+    public void testNoopAdapter() throws Exception {
+        final NoOpStorageAdapter noOpStorageAdapter = new NoOpStorageAdapter();
+        final PersisterAggregator persisterAggregator =
+                new PersisterAggregator(Lists.newArrayList(new PersisterWithConfiguration(noOpStorageAdapter, false)));
+
+        noOpStorageAdapter.instantiate(null);
+
+        persisterAggregator.persistConfig(null);
+        persisterAggregator.loadLastConfigs();
+        persisterAggregator.persistConfig(null);
+        persisterAggregator.loadLastConfigs();
+
+        noOpStorageAdapter.close();
+    }
+
     @Test
     public void testLoadFromPropertyFile() throws Exception {
         PersisterAggregator persisterAggregator = PersisterAggregator.createFromProperties(loadFile("test2.properties"));
index bf27ed6f4dbadab127dc4e3e876676018df2a180..6bb67d0681fc7aa559ba888d057a260cc82b24d6 100644 (file)
       <groupId>${project.groupId}</groupId>
       <artifactId>netconf-util</artifactId>
     </dependency>
+      <dependency>
+          <groupId>${project.groupId}</groupId>
+          <artifactId>netconf-util</artifactId>
+          <type>test-jar</type>
+          <scope>test</scope>
+      </dependency>
     <dependency>
       <groupId>com.google.guava</groupId>
       <artifactId>guava</artifactId>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-api</artifactId>
     </dependency>
+      <dependency>
+          <groupId>org.opendaylight.yangtools</groupId>
+          <artifactId>mockito-configuration</artifactId>
+      </dependency>
   </dependencies>
 
   <build>
index e2ac49c3efed823b3899fa952e6742eb2bcade95..cbbee1f65508353b117bbd21db22b523b4244694 100644 (file)
@@ -8,6 +8,7 @@
 
 package org.opendaylight.controller.netconf.client;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 
 import io.netty.channel.Channel;
@@ -70,8 +71,8 @@ public class NetconfClientSessionNegotiator extends
             logger.debug("Netconf session {} should use exi.", session);
             NetconfStartExiMessage startExiMessage = (NetconfStartExiMessage) sessionPreferences.getStartExiMessage();
             tryToInitiateExi(session, startExiMessage);
-        // Exi is not supported, release session immediately
         } else {
+            // Exi is not supported, release session immediately
             logger.debug("Netconf session {} isn't capable of using exi.", session);
             negotiationSuccessful(session);
         }
@@ -117,6 +118,7 @@ public class NetconfClientSessionNegotiator extends
 
     private long extractSessionId(final Document doc) {
         final Node sessionIdNode = (Node) XmlUtil.evaluateXPath(sessionIdXPath, doc, XPathConstants.NODE);
+        Preconditions.checkState(sessionIdNode != null, "");
         String textContent = sessionIdNode.getTextContent();
         if (textContent == null || textContent.equals("")) {
             throw new IllegalStateException("Session id not received from server");
diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientConfigurationTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientConfigurationTest.java
new file mode 100644 (file)
index 0000000..592cdad
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+
+import java.net.InetSocketAddress;
+
+public class NetconfClientConfigurationTest {
+    @Test
+    public void testNetconfClientConfiguration() throws Exception {
+        Long timeout = 200L;
+        NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+        NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+        InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+        ReconnectStrategy strategy = Mockito.mock(ReconnectStrategy.class);
+        AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+        NetconfClientConfiguration cfg = NetconfClientConfigurationBuilder.create().
+                withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+                withAddress(address).
+                withConnectionTimeoutMillis(timeout).
+                withReconnectStrategy(strategy).
+                withAdditionalHeader(header).
+                withSessionListener(listener).
+                withAuthHandler(handler).build();
+
+        Assert.assertEquals(timeout, cfg.getConnectionTimeoutMillis());
+        Assert.assertEquals(Optional.fromNullable(header), cfg.getAdditionalHeader());
+        Assert.assertEquals(listener, cfg.getSessionListener());
+        Assert.assertEquals(handler, cfg.getAuthHandler());
+        Assert.assertEquals(strategy, cfg.getReconnectStrategy());
+        Assert.assertEquals(NetconfClientConfiguration.NetconfClientProtocol.SSH, cfg.getProtocol());
+        Assert.assertEquals(address, cfg.getAddress());
+    }
+}
diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientDispatcherImplTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientDispatcherImplTest.java
new file mode 100644 (file)
index 0000000..5a2ec56
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelPromise;
+import io.netty.channel.EventLoopGroup;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
+import io.netty.util.concurrent.GenericFutureListener;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+import org.opendaylight.protocol.framework.ReconnectStrategyFactory;
+
+import java.net.InetSocketAddress;
+import java.util.concurrent.Future;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doReturn;
+
+public class NetconfClientDispatcherImplTest {
+    @Test
+    public void testNetconfClientDispatcherImpl() throws Exception {
+        EventLoopGroup bossGroup = Mockito.mock(EventLoopGroup.class);
+        EventLoopGroup workerGroup = Mockito.mock(EventLoopGroup.class);
+        Timer timer = new HashedWheelTimer();
+
+        ChannelFuture chf = Mockito.mock(ChannelFuture.class);
+        Channel ch = Mockito.mock(Channel.class);
+        doReturn(ch).when(chf).channel();
+        Throwable thr = Mockito.mock(Throwable.class);
+        doReturn(chf).when(workerGroup).register(any(Channel.class));
+
+        ChannelPromise promise = Mockito.mock(ChannelPromise.class);
+        doReturn(promise).when(chf).addListener(any(GenericFutureListener.class));
+        doReturn(thr).when(chf).cause();
+
+        Long timeout = 200L;
+        NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+        NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+        InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+        ReconnectStrategyFactory reconnectStrategyFactory = Mockito.mock(ReconnectStrategyFactory.class);
+        AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+        ReconnectStrategy reconnect = Mockito.mock(ReconnectStrategy.class);
+
+        doReturn(5).when(reconnect).getConnectTimeout();
+        doReturn("").when(reconnect).toString();
+        doReturn("").when(handler).toString();
+        doReturn("").when(reconnectStrategyFactory).toString();
+        doReturn(reconnect).when(reconnectStrategyFactory).createReconnectStrategy();
+
+        NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create().
+                withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+                withAddress(address).
+                withConnectionTimeoutMillis(timeout).
+                withReconnectStrategy(reconnect).
+                withAdditionalHeader(header).
+                withSessionListener(listener).
+                withConnectStrategyFactory(reconnectStrategyFactory).
+                withAuthHandler(handler).build();
+
+        NetconfReconnectingClientConfiguration cfg2 = NetconfReconnectingClientConfigurationBuilder.create().
+                withProtocol(NetconfClientConfiguration.NetconfClientProtocol.TCP).
+                withAddress(address).
+                withConnectionTimeoutMillis(timeout).
+                withReconnectStrategy(reconnect).
+                withAdditionalHeader(header).
+                withSessionListener(listener).
+                withConnectStrategyFactory(reconnectStrategyFactory).
+                withAuthHandler(handler).build();
+
+        NetconfClientDispatcherImpl dispatcher = new NetconfClientDispatcherImpl(bossGroup, workerGroup, timer);
+        Future<NetconfClientSession> sshSession = dispatcher.createClient(cfg);
+        Future<NetconfClientSession> tcpSession = dispatcher.createClient(cfg2);
+
+        Future<Void> sshReconn = dispatcher.createReconnectingClient(cfg);
+        Future<Void> tcpReconn = dispatcher.createReconnectingClient(cfg2);
+
+        assertNotNull(sshSession);
+        assertNotNull(tcpSession);
+        assertNotNull(sshReconn);
+        assertNotNull(tcpReconn);
+
+    }
+}
diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiatorFactoryTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiatorFactoryTest.java
new file mode 100644 (file)
index 0000000..0557a0c
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import io.netty.channel.Channel;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.Timer;
+import io.netty.util.concurrent.Promise;
+import org.apache.sshd.common.SessionListener;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.opendaylight.protocol.framework.SessionNegotiator;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class NetconfClientSessionNegotiatorFactoryTest {
+    @Test
+    public void testGetSessionNegotiator() throws Exception {
+        NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+        Timer timer = new HashedWheelTimer();
+        SessionListenerFactory listenerFactory = mock(SessionListenerFactory.class);
+        doReturn(sessionListener).when(listenerFactory).getSessionListener();
+
+        Channel channel = mock(Channel.class);
+        Promise promise = mock(Promise.class);
+        NetconfClientSessionNegotiatorFactory negotiatorFactory = new NetconfClientSessionNegotiatorFactory(timer,
+                Optional.<NetconfHelloMessageAdditionalHeader>absent(), 200L);
+
+        SessionNegotiator sessionNegotiator = negotiatorFactory.getSessionNegotiator(listenerFactory, channel, promise);
+        assertNotNull(sessionNegotiator);
+    }
+}
diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiatorTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiatorTest.java
new file mode 100644 (file)
index 0000000..333e9de
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import io.netty.channel.*;
+import io.netty.handler.ssl.SslHandler;
+import io.netty.util.HashedWheelTimer;
+import io.netty.util.concurrent.GenericFutureListener;
+import io.netty.util.concurrent.Promise;
+import org.apache.mina.handler.demux.ExceptionHandler;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.mockito.internal.util.collections.Sets;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.netconf.api.NetconfClientSessionPreferences;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import io.netty.util.Timer;
+import org.opendaylight.controller.netconf.nettyutil.handler.ChunkedFramingMechanismEncoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToHelloMessageDecoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfXMLToMessageDecoder;
+import org.opendaylight.controller.netconf.nettyutil.handler.exi.NetconfStartExiMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.openexi.proc.common.EXIOptions;
+import org.w3c.dom.Document;
+import java.util.Set;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+public class NetconfClientSessionNegotiatorTest {
+
+    private NetconfHelloMessage helloMessage;
+    private ChannelPipeline pipeline;
+    private ChannelFuture future;
+    private Channel channel;
+    private ChannelInboundHandlerAdapter channelInboundHandlerAdapter;
+
+    @Before
+    public void setUp() throws Exception {
+        helloMessage = NetconfHelloMessage.createClientHello(Sets.newSet("exi:1.0"), Optional.<NetconfHelloMessageAdditionalHeader>absent());
+        pipeline = mockChannelPipeline();
+        future = mockChannelFuture();
+        channel = mockChannel();
+        System.out.println("setup done");
+    }
+
+    private ChannelHandler mockChannelHandler() {
+        ChannelHandler handler = mock(ChannelHandler.class);
+        return handler;
+    }
+
+    private Channel mockChannel() {
+        Channel channel = mock(Channel.class);
+        ChannelHandler channelHandler = mockChannelHandler();
+        doReturn("").when(channel).toString();
+        doReturn(future).when(channel).close();
+        doReturn(future).when(channel).writeAndFlush(anyObject());
+        doReturn(true).when(channel).isOpen();
+        doReturn(pipeline).when(channel).pipeline();
+        doReturn("").when(pipeline).toString();
+        doReturn(pipeline).when(pipeline).remove(any(ChannelHandler.class));
+        doReturn(channelHandler).when(pipeline).remove(anyString());
+        return channel;
+    }
+
+    private ChannelFuture mockChannelFuture() {
+        ChannelFuture future = mock(ChannelFuture.class);
+        doReturn(future).when(future).addListener(any(GenericFutureListener.class));
+        return future;
+    }
+
+    private ChannelPipeline mockChannelPipeline() {
+        ChannelPipeline pipeline = mock(ChannelPipeline.class);
+        ChannelHandler handler = mock(ChannelHandler.class);
+        doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+        doReturn(null).when(pipeline).get(SslHandler.class);
+        doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class));
+        doReturn(handler).when(pipeline).replace(anyString(), anyString(), any(ChunkedFramingMechanismEncoder.class));
+
+        NetconfXMLToHelloMessageDecoder messageDecoder = new NetconfXMLToHelloMessageDecoder();
+        doReturn(messageDecoder).when(pipeline).replace(anyString(), anyString(), any(NetconfXMLToMessageDecoder.class));
+        doReturn(pipeline).when(pipeline).replace(any(ChannelHandler.class), anyString(), any(NetconfClientSession.class));
+        return pipeline;
+    }
+
+    private NetconfClientSessionNegotiator createNetconfClientSessionNegotiator(Promise promise,
+                                                                                NetconfMessage startExi) {
+        ChannelProgressivePromise progressivePromise = mock(ChannelProgressivePromise.class);
+        NetconfClientSessionPreferences preferences = new NetconfClientSessionPreferences(helloMessage, startExi);
+        doReturn(progressivePromise).when(promise).setFailure(any(Throwable.class));
+
+        long timeout = 10L;
+        NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+        Timer timer = new HashedWheelTimer();
+        return new NetconfClientSessionNegotiator(preferences, promise, channel, timer, sessionListener, timeout);
+    }
+
+    @Test
+    public void testNetconfClientSessionNegotiator() throws Exception {
+        Promise promise = mock(Promise.class);
+        doReturn(promise).when(promise).setSuccess(anyObject());
+        NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, null);
+
+        negotiator.channelActive(null);
+        Set caps = Sets.newSet("a", "b");
+        NetconfHelloMessage helloServerMessage = NetconfHelloMessage.createServerHello(caps, 10);
+        negotiator.handleMessage(helloServerMessage);
+        verify(promise).setSuccess(anyObject());
+    }
+
+    @Test
+    public void testNetconfClientSessionNegotiatorWithEXI() throws Exception {
+        Promise promise = mock(Promise.class);
+        EXIOptions exiOptions = new EXIOptions();
+        NetconfStartExiMessage exiMessage = NetconfStartExiMessage.create(exiOptions, "msg-id");
+        doReturn(promise).when(promise).setSuccess(anyObject());
+        NetconfClientSessionNegotiator negotiator = createNetconfClientSessionNegotiator(promise, exiMessage);
+
+        negotiator.channelActive(null);
+        Set caps = Sets.newSet("exi:1.0");
+        NetconfHelloMessage helloMessage = NetconfHelloMessage.createServerHello(caps, 10);
+
+        doAnswer(new Answer() {
+            @Override
+            public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
+                channelInboundHandlerAdapter = ((ChannelInboundHandlerAdapter) invocationOnMock.getArguments()[2]);
+                return null;
+            }
+        }).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+
+        ChannelHandlerContext handlerContext = mock(ChannelHandlerContext.class);
+        doReturn(pipeline).when(handlerContext).pipeline();
+        negotiator.handleMessage(helloMessage);
+        Document expectedResult = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc-reply_ok.xml");
+        channelInboundHandlerAdapter.channelRead(handlerContext, new NetconfMessage(expectedResult));
+
+        verify(promise).setSuccess(anyObject());
+
+        // two calls for exiMessage, 2 for hello message
+        verify(pipeline, times(4)).replace(anyString(), anyString(), any(ChannelHandler.class));
+    }
+}
diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfClientSessionTest.java
new file mode 100644 (file)
index 0000000..4175190
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.collect.Lists;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.Mockito;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.client.NetconfClientSession;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfEXICodec;
+import org.openexi.proc.common.EXIOptions;
+
+import java.util.ArrayList;
+import java.util.Collection;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.mock;
+
+public class NetconfClientSessionTest {
+
+    @Mock
+    ChannelHandler channelHandler;
+
+    @Mock
+    Channel channel;
+
+    @Before
+    public void setUp() throws Exception {
+        MockitoAnnotations.initMocks(this);
+    }
+
+    @Test
+    public void testNetconfClientSession() throws Exception {
+        NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+        long sessId = 20L;
+        Collection<String> caps = Lists.newArrayList("cap1", "cap2");
+
+        NetconfEXICodec codec = new NetconfEXICodec(new EXIOptions());
+        ChannelPipeline pipeline = mock(ChannelPipeline.class);
+
+        Mockito.doReturn(pipeline).when(channel).pipeline();
+        Mockito.doReturn(channelHandler).when(pipeline).replace(anyString(), anyString(), any(ChannelHandler.class));
+        Mockito.doReturn("").when(channelHandler).toString();
+
+        NetconfClientSession session = new NetconfClientSession(sessionListener, channel, sessId, caps);
+        session.addExiHandlers(codec);
+        session.stopExiCommunication();
+
+        assertEquals(caps, session.getServerCapabilities());
+        assertEquals(session, session.thisInstance());
+
+        Mockito.verify(pipeline, Mockito.times(4)).replace(anyString(), anyString(), Mockito.any(ChannelHandler.class));
+    }
+}
diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfReconnectingClientConfigurationTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/NetconfReconnectingClientConfigurationTest.java
new file mode 100644 (file)
index 0000000..e79a370
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import com.google.common.base.Optional;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.config.yang.protocol.framework.NeverReconnectStrategyFactoryModule;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
+import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfiguration;
+import org.opendaylight.controller.netconf.client.conf.NetconfReconnectingClientConfigurationBuilder;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.opendaylight.protocol.framework.ReconnectStrategy;
+import org.opendaylight.protocol.framework.ReconnectStrategyFactory;
+
+import java.net.InetSocketAddress;
+
+public class NetconfReconnectingClientConfigurationTest {
+    @Test
+    public void testNetconfReconnectingClientConfiguration() throws Exception {
+        Long timeout = 200L;
+        NetconfHelloMessageAdditionalHeader header = new NetconfHelloMessageAdditionalHeader("a", "host", "port", "trans", "id");
+        NetconfClientSessionListener listener = new SimpleNetconfClientSessionListener();
+        InetSocketAddress address = InetSocketAddress.createUnresolved("host", 830);
+        ReconnectStrategyFactory strategy = Mockito.mock(ReconnectStrategyFactory.class);
+        AuthenticationHandler handler = Mockito.mock(AuthenticationHandler.class);
+        ReconnectStrategy reconnect = Mockito.mock(ReconnectStrategy.class);
+
+        NetconfReconnectingClientConfiguration cfg = NetconfReconnectingClientConfigurationBuilder.create().
+                withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH).
+                withAddress(address).
+                withConnectionTimeoutMillis(timeout).
+                withReconnectStrategy(reconnect).
+                withAdditionalHeader(header).
+                withSessionListener(listener).
+                withConnectStrategyFactory(strategy).
+                withAuthHandler(handler).build();
+
+        Assert.assertEquals(timeout, cfg.getConnectionTimeoutMillis());
+        Assert.assertEquals(Optional.fromNullable(header), cfg.getAdditionalHeader());
+        Assert.assertEquals(listener, cfg.getSessionListener());
+        Assert.assertEquals(handler, cfg.getAuthHandler());
+        Assert.assertEquals(strategy, cfg.getConnectStrategyFactory());
+        Assert.assertEquals(NetconfClientConfiguration.NetconfClientProtocol.SSH, cfg.getProtocol());
+        Assert.assertEquals(address, cfg.getAddress());
+        Assert.assertEquals(reconnect, cfg.getReconnectStrategy());
+    }
+}
diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/SimpleNetconfClientSessionListenerTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/SimpleNetconfClientSessionListenerTest.java
new file mode 100644 (file)
index 0000000..e067cc2
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+import io.netty.channel.*;
+import io.netty.util.concurrent.Future;
+import io.netty.util.concurrent.Promise;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.internal.util.collections.Sets;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
+
+import java.util.Set;
+
+import static org.junit.Assert.*;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.*;
+
+public class SimpleNetconfClientSessionListenerTest {
+
+    private Channel channel;
+    private ChannelFuture channelFuture;
+    Set caps;
+    private NetconfHelloMessage helloMessage;
+    private NetconfMessage message;
+    private NetconfClientSessionListener sessionListener;
+    private NetconfClientSession clientSession;
+
+    @Before
+    public void setUp() throws Exception {
+        channel = mock(Channel.class);
+        channelFuture = mock(ChannelFuture.class);
+        doReturn(channelFuture).when(channel).writeAndFlush(anyObject());
+        caps = Sets.newSet("a", "b");
+        helloMessage = NetconfHelloMessage.createServerHello(caps, 10);
+        message = new NetconfMessage(helloMessage.getDocument());
+        sessionListener = mock(NetconfClientSessionListener.class);
+        clientSession = new NetconfClientSession(sessionListener, channel, 20L, caps);
+    }
+
+    @Test
+    public void testSessionDown() throws Exception {
+        SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener();
+        Future<NetconfMessage> promise = simpleListener.sendRequest(message);
+        simpleListener.onSessionUp(clientSession);
+        verify(channel, times(1)).writeAndFlush(anyObject());
+
+        simpleListener.onSessionDown(clientSession, new Exception());
+        assertFalse(promise.isSuccess());
+    }
+
+    @Test
+    public void testSendRequest() throws Exception {
+        SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener();
+        Future<NetconfMessage> promise = simpleListener.sendRequest(message);
+        simpleListener.onSessionUp(clientSession);
+        verify(channel, times(1)).writeAndFlush(anyObject());
+
+        simpleListener.sendRequest(message);
+        assertFalse(promise.isSuccess());
+    }
+
+    @Test
+    public void testOnMessage() throws Exception {
+        SimpleNetconfClientSessionListener simpleListener = new SimpleNetconfClientSessionListener();
+        Future<NetconfMessage> promise = simpleListener.sendRequest(message);
+        simpleListener.onSessionUp(clientSession);
+        verify(channel, times(1)).writeAndFlush(anyObject());
+
+        simpleListener.onMessage(clientSession, message);
+        assertTrue(promise.isSuccess());
+    }
+}
diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/SshClientChannelInitializerTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/SshClientChannelInitializerTest.java
new file mode 100644 (file)
index 0000000..0830c29
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import io.netty.util.concurrent.Promise;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.opendaylight.protocol.framework.SessionNegotiator;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.*;
+
+public class SshClientChannelInitializerTest {
+    @Test
+    public void test() throws Exception {
+
+        AuthenticationHandler authenticationHandler = mock(AuthenticationHandler.class);
+        NetconfClientSessionNegotiatorFactory negotiatorFactory = mock(NetconfClientSessionNegotiatorFactory.class);
+        NetconfClientSessionListener sessionListener = mock(NetconfClientSessionListener.class);
+
+        SessionNegotiator sessionNegotiator = mock(SessionNegotiator.class);
+        doReturn("").when(sessionNegotiator).toString();
+        doReturn(sessionNegotiator).when(negotiatorFactory).getSessionNegotiator(any(SessionListenerFactory.class), any(Channel.class), any(Promise.class));
+        ChannelPipeline pipeline = mock(ChannelPipeline.class);
+        doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+        Channel channel = mock(Channel.class);
+        doReturn(pipeline).when(channel).pipeline();
+        doReturn("").when(channel).toString();
+        doReturn(pipeline).when(pipeline).addFirst(any(ChannelHandler.class));
+        doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class));
+
+        Promise<NetconfClientSession> promise = mock(Promise.class);
+        doReturn("").when(promise).toString();
+
+        SshClientChannelInitializer initializer = new SshClientChannelInitializer(authenticationHandler, negotiatorFactory,
+                sessionListener);
+        initializer.initialize(channel, promise);
+        verify(pipeline, times(1)).addFirst(any(ChannelHandler.class));
+    }
+}
diff --git a/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/TcpClientChannelInitializerTest.java b/opendaylight/netconf/netconf-client/src/test/java/org/opendaylight/controller/netconf/client/TcpClientChannelInitializerTest.java
new file mode 100644 (file)
index 0000000..e355cf4
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.client;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import io.netty.util.concurrent.Promise;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.nettyutil.AbstractChannelInitializer;
+import org.opendaylight.protocol.framework.SessionListenerFactory;
+import org.opendaylight.protocol.framework.SessionNegotiator;
+
+import static org.mockito.Mockito.*;
+
+public class TcpClientChannelInitializerTest {
+    @Test
+    public void testInitializeSessionNegotiator() throws Exception {
+        NetconfClientSessionNegotiatorFactory factory = mock(NetconfClientSessionNegotiatorFactory.class);
+        SessionNegotiator sessionNegotiator = mock(SessionNegotiator.class);
+        doReturn("").when(sessionNegotiator).toString();
+        doReturn(sessionNegotiator).when(factory).getSessionNegotiator(any(SessionListenerFactory.class), any(Channel.class), any(Promise.class));
+        NetconfClientSessionListener listener = mock(NetconfClientSessionListener.class);
+        TcpClientChannelInitializer initializer = new TcpClientChannelInitializer(factory, listener);
+        ChannelPipeline pipeline = mock(ChannelPipeline.class);
+        doReturn(pipeline).when(pipeline).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+        Channel channel = mock(Channel.class);
+        doReturn(pipeline).when(channel).pipeline();
+        doReturn("").when(channel).toString();
+
+        Promise<NetconfClientSession> promise = mock(Promise.class);
+        doReturn("").when(promise).toString();
+
+        initializer.initializeSessionNegotiator(channel, promise);
+        verify(pipeline, times(1)).addAfter(anyString(), anyString(), any(ChannelHandler.class));
+    }
+}
@@ -6,7 +6,7 @@
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
 
-package org.opendaylight.controller.netconf.client.test;
+package org.opendaylight.controller.netconf.client;
 
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
@@ -26,11 +26,6 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import org.opendaylight.controller.netconf.api.NetconfMessage;
-import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
-import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
-import org.opendaylight.controller.netconf.client.NetconfClientSession;
-import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
-import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
 import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
 import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration.NetconfClientProtocol;
 import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
index 81fac5f12f9c7dc9d3bf40922c19723836c036ce..2a45e1757b8655a6b6be8bcf097867b677fd70d1 100644 (file)
@@ -13,6 +13,10 @@ import com.google.common.collect.Collections2;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 import io.netty.util.internal.ConcurrentSet;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import javax.annotation.Nonnull;
 import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
 import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
 import org.opendaylight.controller.netconf.mapping.api.Capability;
@@ -32,11 +36,6 @@ import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.mon
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.annotation.Nullable;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-
 public class NetconfMonitoringServiceImpl implements NetconfMonitoringService, SessionMonitoringService {
 
     private static final Logger logger = LoggerFactory.getLogger(NetconfMonitoringServiceImpl.class);
@@ -134,9 +133,8 @@ public class NetconfMonitoringServiceImpl implements NetconfMonitoringService, S
 
     private List<Session> transformSessions(Set<NetconfManagementSession> sessions) {
         return Lists.newArrayList(Collections2.transform(sessions, new Function<NetconfManagementSession, Session>() {
-            @Nullable
             @Override
-            public Session apply(@Nullable NetconfManagementSession input) {
+            public Session apply(@Nonnull NetconfManagementSession input) {
                 return input.toManagementSession();
             }
         }));
index ff96ad779fbc974539ced455494129381bb09635..aa590604b0087d5f7cc79f135a246b6a929abe07 100644 (file)
@@ -48,7 +48,7 @@ public class NetconfOperationRouterImpl implements NetconfOperationRouter {
         this.netconfOperationServiceSnapshot = netconfOperationServiceSnapshot;
     }
 
-    private void initNetconfOperations(Set<NetconfOperation> allOperations) {
+    private synchronized void initNetconfOperations(Set<NetconfOperation> allOperations) {
         allNetconfOperations = allOperations;
     }
 
index c5281d01f841263d5f1258975ee85764565a111d..5f8bc06e1015625cf48b9134058f928c54f337c1 100644 (file)
@@ -59,7 +59,7 @@ import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
 import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
 import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
 import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
 import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceFactoryListenerImpl;
 import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
 import org.opendaylight.controller.netconf.mapping.api.Capability;
index 272b686fc034ff33fbbe247b706ec536040660cb..3a70a399bb8f82f563eb5e9f7eaa252c9f28eeb0 100644 (file)
       <artifactId>config-util</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>${project.groupId}</groupId>
+      <artifactId>sal-netconf-connector</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
       <groupId>${project.groupId}</groupId>
       <artifactId>netconf-api</artifactId>
index 4b49c0928b09864342519cd9b2b5770b032cba30..d8eb841a799faf1993f025126bb241d8ceba4c66 100644 (file)
@@ -36,7 +36,7 @@ import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
 import org.opendaylight.controller.config.persist.api.Persister;
 import org.opendaylight.controller.netconf.api.NetconfMessage;
 import org.opendaylight.controller.netconf.api.jmx.CommitJMXNotification;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
 import org.opendaylight.controller.netconf.impl.DefaultCommitNotificationProducer;
 import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
 import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl;
index 72a2f8f7ac951b5b32d31aa9e56eb26b599f96d0..a9558c06cdcd34472315c9ac80019e55dbb98b71 100644 (file)
@@ -28,7 +28,7 @@ import java.util.Set;
 import org.junit.Test;
 import org.opendaylight.controller.netconf.api.NetconfMessage;
 import org.opendaylight.controller.netconf.api.monitoring.NetconfManagementSession;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
 import org.opendaylight.controller.netconf.impl.osgi.NetconfMonitoringServiceImpl;
 import org.opendaylight.controller.netconf.impl.osgi.NetconfOperationServiceSnapshotImpl;
 import org.opendaylight.controller.netconf.impl.osgi.SessionMonitoringService;
index 67ccf0c02ca4a2531986000bfc66db5bddce29f4..bc8efbe91535573cc18bcd00d97e43dcf91abc2f 100644 (file)
@@ -10,31 +10,39 @@ package org.opendaylight.controller.netconf.it;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 
 import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
 import io.netty.channel.local.LocalAddress;
-import io.netty.util.concurrent.Future;
-import io.netty.util.concurrent.GenericFutureListener;
 import io.netty.util.concurrent.GlobalEventExecutor;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
 import org.opendaylight.controller.netconf.api.NetconfMessage;
 import org.opendaylight.controller.netconf.auth.AuthProvider;
 import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
 import org.opendaylight.controller.netconf.client.NetconfClientDispatcherImpl;
+import org.opendaylight.controller.netconf.client.NetconfClientSessionListener;
 import org.opendaylight.controller.netconf.client.SimpleNetconfClientSessionListener;
 import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
 import org.opendaylight.controller.netconf.client.conf.NetconfClientConfigurationBuilder;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
 import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
 import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.LoginPassword;
 import org.opendaylight.controller.netconf.ssh.NetconfSSHServer;
@@ -42,7 +50,15 @@ import org.opendaylight.controller.netconf.ssh.authentication.PEMGenerator;
 import org.opendaylight.controller.netconf.util.messages.NetconfMessageUtil;
 import org.opendaylight.controller.netconf.util.osgi.NetconfConfigUtil;
 import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.api.RemoteDevice;
+import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
+import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
 import org.opendaylight.protocol.framework.NeverReconnectStrategy;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.xml.sax.SAXException;
 
 public class NetconfITSecureTest extends AbstractNetconfConfigTest {
 
@@ -70,7 +86,7 @@ public class NetconfITSecureTest extends AbstractNetconfConfigTest {
     @Test
     public void testSecure() throws Exception {
         final NetconfClientDispatcher dispatch = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
-        try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration())) {
+        try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration(new SimpleNetconfClientSessionListener()))) {
             NetconfMessage response = netconfClient.sendMessage(getGetConfig());
             assertFalse("Unexpected error message " + XmlUtil.toString(response.getDocument()),
                     NetconfMessageUtil.isErrorMessage(response));
@@ -91,29 +107,42 @@ public class NetconfITSecureTest extends AbstractNetconfConfigTest {
     /**
      * Test all requests are handled properly and no mismatch occurs in listener
      */
-    @Test(timeout = 3*60*1000)
+    @Test(timeout = 5*60*1000)
     public void testSecureStress() throws Exception {
+        final int requests = 10000;
+
         final NetconfClientDispatcher dispatch = new NetconfClientDispatcherImpl(getNettyThreadgroup(), getNettyThreadgroup(), getHashedWheelTimer());
-        try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration())) {
+        final NetconfDeviceCommunicator sessionListener = getSessionListener();
+        try (TestingNetconfClient netconfClient = new TestingNetconfClient("testing-ssh-client", dispatch, getClientConfiguration(sessionListener))) {
 
             final AtomicInteger responseCounter = new AtomicInteger(0);
-            final List<Future<?>> futures = Lists.newArrayList();
+            final List<ListenableFuture<RpcResult<NetconfMessage>>> futures = Lists.newArrayList();
 
-            final int requests = 1000;
             for (int i = 0; i < requests; i++) {
-                final Future<NetconfMessage> netconfMessageFuture = netconfClient.sendRequest(getGetConfig());
+                NetconfMessage getConfig = getGetConfig();
+                getConfig = changeMessageId(getConfig, i);
+                final ListenableFuture<RpcResult<NetconfMessage>> netconfMessageFuture = sessionListener.sendRequest(getConfig, QName.create("namespace", "2012-12-12", "get"));
                 futures.add(netconfMessageFuture);
-                netconfMessageFuture.addListener(new GenericFutureListener<Future<? super NetconfMessage>>() {
+                Futures.addCallback(netconfMessageFuture, new FutureCallback<RpcResult<NetconfMessage>>() {
                     @Override
-                    public void operationComplete(final Future<? super NetconfMessage> future) throws Exception {
-                        assertTrue("Request unsuccessful " + future.cause(), future.isSuccess());
+                    public void onSuccess(final RpcResult<NetconfMessage> result) {
                         responseCounter.incrementAndGet();
                     }
+
+                    @Override
+                    public void onFailure(final Throwable t) {
+                        throw new RuntimeException(t);
+                    }
                 });
             }
 
-            for (final Future<?> future : futures) {
-                future.await();
+            // Wait for every future
+            for (final ListenableFuture<RpcResult<NetconfMessage>> future : futures) {
+                try {
+                    future.get(3, TimeUnit.MINUTES);
+                } catch (final TimeoutException e) {
+                    fail("Request " + futures.indexOf(future) + " is not responding");
+                }
             }
 
             // Give future listeners some time to finish counter incrementation
@@ -123,10 +152,17 @@ public class NetconfITSecureTest extends AbstractNetconfConfigTest {
         }
     }
 
-    public NetconfClientConfiguration getClientConfiguration() throws IOException {
+    private NetconfMessage changeMessageId(final NetconfMessage getConfig, final int i) throws IOException, SAXException {
+        String s = XmlUtil.toString(getConfig.getDocument(), false);
+        s = s.replace("101", Integer.toString(i));
+        return new NetconfMessage(XmlUtil.readXmlToDocument(s));
+    }
+
+    public NetconfClientConfiguration getClientConfiguration(final NetconfClientSessionListener sessionListener) throws IOException {
         final NetconfClientConfigurationBuilder b = NetconfClientConfigurationBuilder.create();
         b.withAddress(TLS_ADDRESS);
-        b.withSessionListener(new SimpleNetconfClientSessionListener());
+        // Using session listener from sal-netconf-connector since stress test cannot be performed with simple listener
+        b.withSessionListener(sessionListener);
         b.withReconnectStrategy(new NeverReconnectStrategy(GlobalEventExecutor.INSTANCE, 5000));
         b.withProtocol(NetconfClientConfiguration.NetconfClientProtocol.SSH);
         b.withConnectionTimeoutMillis(5000);
@@ -134,6 +170,16 @@ public class NetconfITSecureTest extends AbstractNetconfConfigTest {
         return b.build();
     }
 
+    @Mock
+    private RemoteDevice<NetconfSessionCapabilities, NetconfMessage> mockedRemoteDevice;
+
+    private NetconfDeviceCommunicator getSessionListener() {
+        MockitoAnnotations.initMocks(this);
+        doNothing().when(mockedRemoteDevice).onRemoteSessionUp(any(NetconfSessionCapabilities.class), any(RemoteDeviceCommunicator.class));
+        doNothing().when(mockedRemoteDevice).onRemoteSessionDown();
+        return new NetconfDeviceCommunicator(new RemoteDeviceId("secure-test"), mockedRemoteDevice);
+    }
+
     public AuthProvider getAuthProvider() throws Exception {
         final AuthProvider mockAuth = mock(AuthProvider.class);
         doReturn("mockedAuth").when(mockAuth).toString();
index a7a9d7494af73e8553bf9ce9a638b02958c5ece0..4c0730863fd0d0058281f468ec245c45c25e8fd6 100644 (file)
@@ -41,7 +41,7 @@ import org.opendaylight.controller.config.yang.test.impl.NetconfTestImplModuleMX
 import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
 import org.opendaylight.controller.netconf.api.NetconfMessage;
 import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
-import org.opendaylight.controller.netconf.client.test.TestingNetconfClient;
+import org.opendaylight.controller.netconf.client.TestingNetconfClient;
 import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
 import org.opendaylight.controller.netconf.util.xml.XmlElement;
 import org.opendaylight.controller.netconf.util.xml.XmlUtil;
index c5037d34ed4ac01fcea4b5dd264b8f715ad2cc9c..91fb805e6ab3f00a14096faea945eb0f4bad97b0 100644 (file)
@@ -7,6 +7,7 @@
     </appender>
 
   <logger name="org.opendaylight.controller.netconf" level="TRACE"/>
+  <logger name="org.opendaylight.controller.sal.connect.netconf" level="TRACE"/>
 
   <root level="error">
     <appender-ref ref="STDOUT" />
index 078509db40f1c71d511bf5881e75b79e75e0f82c..16b38eca5172a96a6254713dbefb6dbd44324949 100644 (file)
@@ -11,6 +11,7 @@ package org.opendaylight.controller.netconf.monitoring.xml.model;
 import com.google.common.base.Function;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Collections2;
+import javax.annotation.Nonnull;
 import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.Yang;
 import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.netconf.monitoring.rev101004.netconf.state.schemas.Schema;
 
@@ -41,7 +42,7 @@ final class MonitoringSchema {
         return Collections2.transform(schema.getLocation(), new Function<Schema.Location, String>() {
             @Nullable
             @Override
-            public String apply(@Nullable Schema.Location input) {
+            public String apply(@Nonnull Schema.Location input) {
                 return input.getEnumeration().toString();
             }
         });
index d0d587fb84263c02ef99cb0727de1d4c81b28223..08441b4ce5308f0bb85bfe606177f40bbfcc4f29 100644 (file)
@@ -38,7 +38,7 @@ import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.yang.types.
 public class JaxBSerializerTest {
 
     @Test
-    public void testName() throws Exception {
+    public void testSerialization() throws Exception {
 
         final NetconfMonitoringService service = new NetconfMonitoringService() {
 
@@ -53,29 +53,29 @@ public class JaxBSerializerTest {
             }
         };
         final NetconfState model = new NetconfState(service);
-        final String xml = XmlUtil.toString(new JaxBSerializer().toXml(model));
+        final String xml = XmlUtil.toString(new JaxBSerializer().toXml(model)).replaceAll("\\s", "");
 
         assertThat(xml, CoreMatchers.containsString(
-                "<schema>\n" +
-                "<format>yang</format>\n" +
-                "<identifier>id</identifier>\n" +
-                "<location>NETCONF</location>\n" +
-                "<namespace>localhost</namespace>\n" +
-                "<version>v1</version>\n" +
-                "</schema>\n"));
+                "<schema>" +
+                "<format>yang</format>" +
+                "<identifier>id</identifier>" +
+                "<location>NETCONF</location>" +
+                "<namespace>localhost</namespace>" +
+                "<version>v1</version>" +
+                "</schema>"));
 
         assertThat(xml, CoreMatchers.containsString(
-                "<session>\n" +
-                "<session-id>1</session-id>\n" +
-                "<in-bad-rpcs>0</in-bad-rpcs>\n" +
-                "<in-rpcs>0</in-rpcs>\n" +
-                "<login-time>loginTime</login-time>\n" +
-                "<out-notifications>0</out-notifications>\n" +
-                "<out-rpc-errors>0</out-rpc-errors>\n" +
-                "<ncme:session-identifier>client</ncme:session-identifier>\n" +
-                "<source-host>address/port</source-host>\n" +
-                "<transport>ncme:netconf-tcp</transport>\n" +
-                "<username>username</username>\n" +
+                "<session>" +
+                "<session-id>1</session-id>" +
+                "<in-bad-rpcs>0</in-bad-rpcs>" +
+                "<in-rpcs>0</in-rpcs>" +
+                "<login-time>loginTime</login-time>" +
+                "<out-notifications>0</out-notifications>" +
+                "<out-rpc-errors>0</out-rpc-errors>" +
+                "<ncme:session-identifier>client</ncme:session-identifier>" +
+                "<source-host>address/port</source-host>" +
+                "<transport>ncme:netconf-tcp</transport>" +
+                "<username>username</username>" +
                 "</session>"));
     }
 
index 993709258a3410b80a8a6fbdde47f0374ca15619..531ba3ccb725589d76725ebfc9ecdae7ecdd35bc 100644 (file)
@@ -7,18 +7,21 @@
  */
 package org.opendaylight.controller.netconf.nettyutil.handler.exi;
 
-import com.google.common.base.Preconditions;
 import org.opendaylight.controller.netconf.util.xml.XmlElement;
 import org.openexi.proc.common.AlignmentType;
 import org.openexi.proc.common.EXIOptions;
 import org.openexi.proc.common.EXIOptionsException;
+import org.w3c.dom.Element;
+import org.w3c.dom.NodeList;
+
+import com.google.common.base.Preconditions;
 
 public final class EXIParameters {
     private static final String EXI_PARAMETER_ALIGNMENT = "alignment";
-    private static final String EXI_PARAMETER_BYTE_ALIGNED = "byte-aligned";
-    private static final String EXI_PARAMETER_BIT_PACKED = "bit-packed";
-    private static final String EXI_PARAMETER_COMPRESSED = "compressed";
-    private static final String EXI_PARAMETER_PRE_COMPRESSION = "pre-compression";
+    static final String EXI_PARAMETER_BYTE_ALIGNED = "byte-aligned";
+    static final String EXI_PARAMETER_BIT_PACKED = "bit-packed";
+    static final String EXI_PARAMETER_COMPRESSED = "compressed";
+    static final String EXI_PARAMETER_PRE_COMPRESSION = "pre-compression";
 
     private static final String EXI_PARAMETER_FIDELITY = "fidelity";
     private static final String EXI_FIDELITY_DTD = "dtd";
@@ -38,15 +41,25 @@ public final class EXIParameters {
         final EXIOptions options =  new EXIOptions();
 
         options.setAlignmentType(AlignmentType.bitPacked);
-        if (root.getElementsByTagName(EXI_PARAMETER_ALIGNMENT).getLength() > 0) {
-            if (root.getElementsByTagName(EXI_PARAMETER_BIT_PACKED).getLength() > 0) {
-                options.setAlignmentType(AlignmentType.bitPacked);
-            } else if (root.getElementsByTagName(EXI_PARAMETER_BYTE_ALIGNED).getLength() > 0) {
-                options.setAlignmentType(AlignmentType.byteAligned);
-            } else if (root.getElementsByTagName(EXI_PARAMETER_COMPRESSED).getLength() > 0) {
-                options.setAlignmentType(AlignmentType.compress);
-            } else if (root.getElementsByTagName(EXI_PARAMETER_PRE_COMPRESSION).getLength() > 0) {
-                options.setAlignmentType(AlignmentType.preCompress);
+
+        final NodeList alignmentElements = root.getElementsByTagName(EXI_PARAMETER_ALIGNMENT);
+        if (alignmentElements.getLength() > 0) {
+            final Element alignmentElement = (Element) alignmentElements.item(0);
+            final String alignmentTextContent = alignmentElement.getTextContent().trim();
+
+            switch (alignmentTextContent) {
+                case EXI_PARAMETER_BIT_PACKED:
+                    options.setAlignmentType(AlignmentType.bitPacked);
+                    break;
+                case EXI_PARAMETER_BYTE_ALIGNED:
+                    options.setAlignmentType(AlignmentType.byteAligned);
+                    break;
+                case EXI_PARAMETER_COMPRESSED:
+                    options.setAlignmentType(AlignmentType.compress);
+                    break;
+                case EXI_PARAMETER_PRE_COMPRESSION:
+                    options.setAlignmentType(AlignmentType.preCompress);
+                    break;
             }
         }
 
index 72eb774b5303efb14769d7fe1da644ea34456d82..1d301d3d35cc05f63d83c5643d400f3f643e02d7 100644 (file)
@@ -8,8 +8,8 @@
 
 package org.opendaylight.controller.netconf.nettyutil.handler.exi;
 
+import com.google.common.collect.Lists;
 import java.util.List;
-
 import org.opendaylight.controller.netconf.api.NetconfMessage;
 import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
 import org.opendaylight.controller.netconf.util.xml.XmlUtil;
@@ -17,8 +17,6 @@ import org.openexi.proc.common.EXIOptions;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 
-import com.google.common.collect.Lists;
-
 /**
  * Start-exi netconf message.
  */
@@ -33,19 +31,19 @@ public final class NetconfStartExiMessage extends NetconfMessage {
     public static final String PIS_KEY = "pis";
     public static final String PREFIXES_KEY = "prefixes";
 
-    private NetconfStartExiMessage(Document doc) {
+    private NetconfStartExiMessage(final Document doc) {
         super(doc);
     }
 
-    public static NetconfStartExiMessage create(EXIOptions exiOptions, String messageId) {
-        Document doc = XmlUtil.newDocument();
-        Element rpcElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+    public static NetconfStartExiMessage create(final EXIOptions exiOptions, final String messageId) {
+        final Document doc = XmlUtil.newDocument();
+        final Element rpcElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
                 XmlNetconfConstants.RPC_KEY);
         rpcElement.setAttributeNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
                 XmlNetconfConstants.MESSAGE_ID, messageId);
 
         // TODO draft http://tools.ietf.org/html/draft-varga-netconf-exi-capability-02#section-3.5.1 has no namespace for start-exi element in xml
-        Element startExiElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0,
+        final Element startExiElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0,
                 START_EXI);
 
         addAlignment(exiOptions, doc, startExiElement);
@@ -57,8 +55,8 @@ public final class NetconfStartExiMessage extends NetconfMessage {
         return new NetconfStartExiMessage(doc);
     }
 
-    private static void addFidelity(EXIOptions exiOptions, Document doc, Element startExiElement) {
-        List<Element> fidelityElements = Lists.newArrayList();
+    private static void addFidelity(final EXIOptions exiOptions, final Document doc, final Element startExiElement) {
+        final List<Element> fidelityElements = Lists.newArrayList();
         createFidelityElement(doc, fidelityElements, exiOptions.getPreserveComments(), COMMENTS_KEY);
         createFidelityElement(doc, fidelityElements, exiOptions.getPreserveDTD(), DTD_KEY);
         createFidelityElement(doc, fidelityElements, exiOptions.getPreserveLexicalValues(), LEXICAL_VALUES_KEY);
@@ -66,23 +64,44 @@ public final class NetconfStartExiMessage extends NetconfMessage {
         createFidelityElement(doc, fidelityElements, exiOptions.getPreserveNS(), PREFIXES_KEY);
 
         if (fidelityElements.isEmpty() == false) {
-            Element fidelityElement = doc.createElementNS(
+            final Element fidelityElement = doc.createElementNS(
                     XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0, FIDELITY_KEY);
-            for (Element element : fidelityElements) {
+            for (final Element element : fidelityElements) {
                 fidelityElement.appendChild(element);
             }
             startExiElement.appendChild(fidelityElement);
         }
     }
 
-    private static void addAlignment(EXIOptions exiOptions, Document doc, Element startExiElement) {
-        Element alignmentElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0,
+    private static void addAlignment(final EXIOptions exiOptions, final Document doc, final Element startExiElement) {
+        final Element alignmentElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0,
                 ALIGNMENT_KEY);
-        alignmentElement.setTextContent(exiOptions.getAlignmentType().toString());
+
+        String alignmentString = EXIParameters.EXI_PARAMETER_BIT_PACKED;
+        switch (exiOptions.getAlignmentType()) {
+        case byteAligned: {
+            alignmentString = EXIParameters.EXI_PARAMETER_BYTE_ALIGNED;
+            break;
+        }
+        case bitPacked: {
+            alignmentString = EXIParameters.EXI_PARAMETER_BIT_PACKED;
+            break;
+        }
+        case compress: {
+            alignmentString = EXIParameters.EXI_PARAMETER_COMPRESSED;
+            break;
+        }
+        case preCompress: {
+            alignmentString = EXIParameters.EXI_PARAMETER_PRE_COMPRESSION;
+            break;
+        }
+        }
+
+        alignmentElement.setTextContent(alignmentString);
         startExiElement.appendChild(alignmentElement);
     }
 
-    private static void createFidelityElement(Document doc, List<Element> fidelityElements, boolean fidelity, String fidelityName) {
+    private static void createFidelityElement(final Document doc, final List<Element> fidelityElements, final boolean fidelity, final String fidelityName) {
 
         if (fidelity) {
             fidelityElements.add(doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_EXI_1_0,
index 0d877c9ec73797010013df229b9101d86445304f..3d1e4784f2ded5677472e3a25d99adaa11e24cf9 100644 (file)
@@ -148,9 +148,11 @@ public class AsyncSshHandler extends ChannelOutboundHandlerAdapter {
         connectPromise = null;
 
         sshReadAsyncListener = new SshReadAsyncListener(this, ctx, channel.getAsyncOut());
-        sshWriteAsyncHandler = new SshWriteAsyncHandler(this, channel.getAsyncIn());
-
-        ctx.fireChannelActive();
+        // if readAsyncListener receives immediate close, it will close this handler and closing this handler sets channel variable to null
+        if(channel != null) {
+            sshWriteAsyncHandler = new SshWriteAsyncHandler(this, channel.getAsyncIn());
+            ctx.fireChannelActive();
+        }
     }
 
     private synchronized void handleSshSetupFailure(final ChannelHandlerContext ctx, final Throwable e) {
@@ -230,17 +232,14 @@ public class AsyncSshHandler extends ChannelOutboundHandlerAdapter {
         @Override
         public synchronized void operationComplete(final IoReadFuture future) {
             if(future.getException() != null) {
-
                 if(asyncOut.isClosed() || asyncOut.isClosing()) {
-
                     // Ssh dropped
                     logger.debug("Ssh session dropped on channel: {}", ctx.channel(), future.getException());
-                    invokeDisconnect();
-                    return;
                 } else {
                     logger.warn("Exception while reading from SSH remote on channel {}", ctx.channel(), future.getException());
-                    invokeDisconnect();
                 }
+                invokeDisconnect();
+                return;
             }
 
             if (future.getRead() > 0) {
@@ -324,22 +323,29 @@ public class AsyncSshHandler extends ChannelOutboundHandlerAdapter {
                 // Check limit for pending writes
                 pendingWriteCounter++;
                 if(pendingWriteCounter > MAX_PENDING_WRITES) {
+                    promise.setFailure(e);
                     handlePendingFailed(ctx, new IllegalStateException("Too much pending writes(" + MAX_PENDING_WRITES + ") on channel: " + ctx.channel() +
                             ", remote window is not getting read or is too small"));
                 }
 
+                // We need to reset buffer read index, since we've already read it when we tried to write it the first time
+                ((ByteBuf) msg).resetReaderIndex();
                 logger.debug("Write pending to SSH remote on channel: {}, current pending count: {}", ctx.channel(), pendingWriteCounter);
 
                 // In case of pending, re-invoke write after pending is finished
+                Preconditions.checkNotNull(lastWriteFuture, "Write is pending, but there was no previous write attempt", e);
                 lastWriteFuture.addListener(new SshFutureListener<IoWriteFuture>() {
                     @Override
                     public void operationComplete(final IoWriteFuture future) {
+                        // FIXME possible minor race condition, we cannot guarantee that this callback when pending is finished will be executed first
+                        // External thread could trigger write on this instance while we are on this line
+                        // Verify
                         if (future.isWritten()) {
                             synchronized (SshWriteAsyncHandler.this) {
                                 // Pending done, decrease counter
                                 pendingWriteCounter--;
+                                write(ctx, msg, promise);
                             }
-                            write(ctx, msg, promise);
                         } else {
                             // Cannot reschedule pending, fail
                             handlePendingFailed(ctx, e);
diff --git a/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/AbstractChannelInitializerTest.java b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/AbstractChannelInitializerTest.java
new file mode 100644 (file)
index 0000000..83eafb5
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import io.netty.util.concurrent.Promise;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.api.NetconfSession;
+
+public class AbstractChannelInitializerTest {
+
+    @Mock
+    private Channel channel;
+    @Mock
+    private ChannelPipeline pipeline;
+    @Mock
+    private Promise<NetconfSession> sessionPromise;
+
+    @Before
+    public void setUp() throws Exception {
+        MockitoAnnotations.initMocks(this);
+        doReturn(pipeline).when(channel).pipeline();
+        doReturn(pipeline).when(pipeline).addLast(anyString(), any(ChannelHandler.class));
+    }
+
+    @Test
+    public void testInit() throws Exception {
+        final TestingInitializer testingInitializer = new TestingInitializer();
+        testingInitializer.initialize(channel, sessionPromise);
+        verify(pipeline, times(4)).addLast(anyString(), any(ChannelHandler.class));
+    }
+
+    private static final class TestingInitializer extends AbstractChannelInitializer<NetconfSession> {
+
+        @Override
+        protected void initializeSessionNegotiator(final Channel ch, final Promise<NetconfSession> promise) {
+        }
+    }
+
+}
\ No newline at end of file
diff --git a/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/AbstractNetconfSessionTest.java b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/AbstractNetconfSessionTest.java
new file mode 100644 (file)
index 0000000..8199963
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+
+import com.google.common.base.Optional;
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.channel.ChannelHandler;
+import io.netty.channel.ChannelPipeline;
+import java.util.Collections;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.api.NetconfSession;
+import org.opendaylight.controller.netconf.api.NetconfSessionListener;
+import org.opendaylight.controller.netconf.api.NetconfTerminationReason;
+import org.opendaylight.controller.netconf.nettyutil.handler.NetconfEXICodec;
+import org.opendaylight.controller.netconf.nettyutil.handler.exi.NetconfStartExiMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessage;
+import org.opendaylight.controller.netconf.util.messages.NetconfHelloMessageAdditionalHeader;
+import org.openexi.proc.common.EXIOptions;
+
+public class AbstractNetconfSessionTest {
+
+    @Mock
+    private NetconfSessionListener<NetconfSession> listener;
+    @Mock
+    private Channel channel;
+    @Mock
+    private ChannelPipeline pipeline;
+    private NetconfHelloMessage clientHello;
+
+    @Before
+    public void setUp() throws Exception {
+        MockitoAnnotations.initMocks(this);
+        doNothing().when(listener).onMessage(any(NetconfSession.class), any(NetconfMessage.class));
+        doNothing().when(listener).onSessionUp(any(NetconfSession.class));
+        doNothing().when(listener).onSessionDown(any(NetconfSession.class), any(Exception.class));
+        doNothing().when(listener).onSessionTerminated(any(NetconfSession.class), any(NetconfTerminationReason.class));
+
+        doReturn(mock(ChannelFuture.class)).when(channel).writeAndFlush(any(NetconfMessage.class));
+        doReturn(pipeline).when(channel).pipeline();
+        doReturn(mock(ChannelFuture.class)).when(channel).close();
+
+        doReturn(null).when(pipeline).replace(anyString(), anyString(), any(ChannelHandler.class));
+
+        clientHello = NetconfHelloMessage.createClientHello(Collections.<String>emptySet(), Optional.<NetconfHelloMessageAdditionalHeader>absent());
+    }
+
+    @Test
+    public void testHandleMessage() throws Exception {
+        final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+        testingNetconfSession.handleMessage(clientHello);
+        verify(listener).onMessage(testingNetconfSession, clientHello);
+    }
+
+    @Test
+    public void testSessionUp() throws Exception {
+        final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+        testingNetconfSession.sessionUp();
+        verify(listener).onSessionUp(testingNetconfSession);
+        assertEquals(1L, testingNetconfSession.getSessionId());
+    }
+
+    @Test
+    public void testClose() throws Exception {
+        final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+        testingNetconfSession.sessionUp();
+        testingNetconfSession.close();
+        verify(channel).close();
+        verify(listener).onSessionTerminated(any(NetconfSession.class), any(NetconfTerminationReason.class));
+    }
+
+    @Test
+    public void testReplaceHandlers() throws Exception {
+        final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+        final ChannelHandler mock = mock(ChannelHandler.class);
+        doReturn("handler").when(mock).toString();
+
+        testingNetconfSession.replaceMessageDecoder(mock);
+        verify(pipeline).replace(AbstractChannelInitializer.NETCONF_MESSAGE_DECODER, AbstractChannelInitializer.NETCONF_MESSAGE_DECODER, mock);
+        testingNetconfSession.replaceMessageEncoder(mock);
+        verify(pipeline).replace(AbstractChannelInitializer.NETCONF_MESSAGE_ENCODER, AbstractChannelInitializer.NETCONF_MESSAGE_ENCODER, mock);
+        testingNetconfSession.replaceMessageEncoderAfterNextMessage(mock);
+        verifyNoMoreInteractions(pipeline);
+
+        testingNetconfSession.sendMessage(clientHello);
+        verify(pipeline, times(2)).replace(AbstractChannelInitializer.NETCONF_MESSAGE_ENCODER, AbstractChannelInitializer.NETCONF_MESSAGE_ENCODER, mock);
+    }
+
+    @Test
+    public void testStartExi() throws Exception {
+        TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+        testingNetconfSession = spy(testingNetconfSession);
+
+        testingNetconfSession.startExiCommunication(NetconfStartExiMessage.create(new EXIOptions(), "4"));
+        verify(testingNetconfSession).addExiHandlers(any(NetconfEXICodec.class));
+    }
+
+    @Test
+    public void testEndOfInput() throws Exception {
+        final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+        testingNetconfSession.endOfInput();
+        verifyZeroInteractions(listener);
+        testingNetconfSession.sessionUp();
+        testingNetconfSession.endOfInput();
+        verify(listener).onSessionDown(any(NetconfSession.class), any(Exception.class));
+    }
+
+    @Test
+    public void testSendMessage() throws Exception {
+        final TestingNetconfSession testingNetconfSession = new TestingNetconfSession(listener, channel, 1L);
+        final NetconfHelloMessage clientHello = NetconfHelloMessage.createClientHello(Collections.<String>emptySet(), Optional.<NetconfHelloMessageAdditionalHeader>absent());
+        testingNetconfSession.sendMessage(clientHello);
+        verify(channel).writeAndFlush(clientHello);
+    }
+
+    private static class TestingNetconfSession extends AbstractNetconfSession<NetconfSession, NetconfSessionListener<NetconfSession>> {
+
+        protected TestingNetconfSession(final NetconfSessionListener<NetconfSession> sessionListener, final Channel channel, final long sessionId) {
+            super(sessionListener, channel, sessionId);
+        }
+
+        @Override
+        protected NetconfSession thisInstance() {
+            return this;
+        }
+
+        @Override
+        protected void addExiHandlers(final NetconfEXICodec exiCodec) {}
+
+        @Override
+        public void stopExiCommunication() {}
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/NetconfEXIHandlersTest.java b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/NetconfEXIHandlersTest.java
new file mode 100644 (file)
index 0000000..4a8db17
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler;
+
+import static org.junit.Assert.*;
+
+import com.google.common.collect.Lists;
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.openexi.proc.common.EXIOptions;
+import org.openexi.proc.common.EXIOptionsException;
+import org.openexi.sax.Transmogrifier;
+import org.openexi.sax.TransmogrifierException;
+import org.xml.sax.InputSource;
+
+public class NetconfEXIHandlersTest {
+
+    private final String msgAsString = "<netconf-message/>";
+    private NetconfMessageToEXIEncoder netconfMessageToEXIEncoder;
+    private NetconfEXIToMessageDecoder netconfEXIToMessageDecoder;
+    private NetconfMessage msg;
+    private byte[] msgAsExi;
+
+    @Before
+    public void setUp() throws Exception {
+        final NetconfEXICodec codec = new NetconfEXICodec(new EXIOptions());
+        netconfMessageToEXIEncoder = new NetconfMessageToEXIEncoder(codec);
+        netconfEXIToMessageDecoder = new NetconfEXIToMessageDecoder(codec);
+
+        msg = new NetconfMessage(XmlUtil.readXmlToDocument(msgAsString));
+        this.msgAsExi = msgToExi(msgAsString, codec);
+    }
+
+    private byte[] msgToExi(final String msgAsString, final NetconfEXICodec codec) throws EXIOptionsException, TransmogrifierException, IOException {
+        final ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
+        final Transmogrifier transmogrifier = codec.getTransmogrifier();
+        transmogrifier.setOutputStream(byteArrayOutputStream);
+        transmogrifier.encode(new InputSource(new ByteArrayInputStream(msgAsString.getBytes())));
+        return byteArrayOutputStream.toByteArray();
+    }
+
+    @Test
+    public void testEncodeDecode() throws Exception {
+        final ByteBuf buffer = Unpooled.buffer();
+        netconfMessageToEXIEncoder.encode(null, msg, buffer);
+        final int exiLength = msgAsExi.length;
+        // array from buffer is cca 256 n length, compare only subarray
+        assertArrayEquals(msgAsExi, Arrays.copyOfRange(buffer.array(), 0, exiLength));
+
+        // assert all other bytes in buffer be 0
+        for (int i = exiLength; i < buffer.array().length; i++) {
+            assertEquals((byte)0, buffer.array()[i]);
+        }
+
+        final List<Object> out = Lists.newArrayList();
+        netconfEXIToMessageDecoder.decode(null, buffer, out);
+
+        XMLUnit.compareXML(msg.getDocument(), ((NetconfMessage) out.get(0)).getDocument());
+    }
+}
\ No newline at end of file
index f0c0d6341bd00fc752a693325e7b8e264d66a9aa..ac6370685a06f600da72a5e0e362a7010c1d1bd3 100644 (file)
@@ -36,7 +36,7 @@ public class NetconfXMLToHelloMessageDecoderTest {
         assertThat(out.get(0), CoreMatchers.instanceOf(NetconfHelloMessage.class));
         final NetconfHelloMessage hello = (NetconfHelloMessage) out.get(0);
         assertTrue(hello.getAdditionalHeader().isPresent());
-        assertEquals("[tomas;10.0.0.0:10000;tcp;client;]\n", hello.getAdditionalHeader().get().toFormattedString());
+        assertEquals("[tomas;10.0.0.0:10000;tcp;client;]" + System.lineSeparator(), hello.getAdditionalHeader().get().toFormattedString());
         assertThat(XmlUtil.toString(hello.getDocument()), CoreMatchers.containsString("<hello xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\""));
     }
 
diff --git a/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/EXIParametersTest.java b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/EXIParametersTest.java
new file mode 100644 (file)
index 0000000..15ba3b4
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.exi;
+
+import static org.junit.Assert.assertEquals;
+
+import java.util.Arrays;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.openexi.proc.common.AlignmentType;
+import org.openexi.proc.common.EXIOptions;
+
+@RunWith(Parameterized.class)
+public class EXIParametersTest {
+
+    @Parameterized.Parameters
+    public static Iterable<Object[]> data() throws Exception {
+        final String noChangeXml =
+                "<start-exi xmlns=\"urn:ietf:params:xml:ns:netconf:exi:1.0\">\n" +
+                "<alignment>bit-packed</alignment>\n" +
+                "</start-exi>\n";
+
+
+        final String fullOptionsXml =
+                "<start-exi xmlns=\"urn:ietf:params:xml:ns:netconf:exi:1.0\">\n" +
+                "<alignment>byte-aligned</alignment>\n" +
+                "<fidelity>\n" +
+                "<comments/>\n" +
+                "<dtd/>\n" +
+                "<lexical-values/>\n" +
+                "<pis/>\n" +
+                "<prefixes/>\n" +
+                "</fidelity>\n" +
+                "</start-exi>\n";
+
+        final EXIOptions fullOptions = new EXIOptions();
+        fullOptions.setAlignmentType(AlignmentType.byteAligned);
+        fullOptions.setPreserveLexicalValues(true);
+        fullOptions.setPreserveDTD(true);
+        fullOptions.setPreserveComments(true);
+        fullOptions.setPreserveNS(true);
+        fullOptions.setPreservePIs(true);
+
+        return Arrays.asList(new Object[][]{
+                {noChangeXml, new EXIOptions()},
+                {fullOptionsXml, fullOptions},
+        });
+    }
+
+    private final String sourceXml;
+    private final EXIOptions exiOptions;
+
+    public EXIParametersTest(final String sourceXml, final EXIOptions exiOptions) {
+        this.sourceXml = sourceXml;
+        this.exiOptions = exiOptions;
+    }
+
+    @Test
+    public void testFromXmlElement() throws Exception {
+        final EXIParameters opts =
+                EXIParameters.fromXmlElement(
+                        XmlElement.fromDomElement(
+                                XmlUtil.readXmlToElement(sourceXml)));
+
+
+        assertEquals(opts.getOptions().getAlignmentType(), exiOptions.getAlignmentType());
+        assertEquals(opts.getOptions().getPreserveComments(), exiOptions.getPreserveComments());
+        assertEquals(opts.getOptions().getPreserveLexicalValues(), exiOptions.getPreserveLexicalValues());
+        assertEquals(opts.getOptions().getPreserveNS(), exiOptions.getPreserveNS());
+        assertEquals(opts.getOptions().getPreserveDTD(), exiOptions.getPreserveDTD());
+        assertEquals(opts.getOptions().getPreserveNS(), exiOptions.getPreserveNS());
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/NetconfStartExiMessageTest.java b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/exi/NetconfStartExiMessageTest.java
new file mode 100644 (file)
index 0000000..47abe96
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.exi;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.Arrays;
+import org.custommonkey.xmlunit.Diff;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.openexi.proc.common.AlignmentType;
+import org.openexi.proc.common.EXIOptions;
+
+@RunWith(Parameterized.class)
+public class NetconfStartExiMessageTest {
+
+    @Parameterized.Parameters
+    public static Iterable<Object[]> data() throws Exception {
+        final String noChangeXml = "<rpc xmlns:ns0=\"urn:ietf:params:xml:ns:netconf:base:1.0\" ns0:message-id=\"id\" xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+                "<start-exi xmlns=\"urn:ietf:params:xml:ns:netconf:exi:1.0\">\n" +
+                "<alignment>bit-packed</alignment>\n" +
+                "</start-exi>\n" +
+                "</rpc>";
+
+
+        final String fullOptionsXml = "<rpc xmlns:ns0=\"urn:ietf:params:xml:ns:netconf:base:1.0\" ns0:message-id=\"id\" xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\n" +
+                "<start-exi xmlns=\"urn:ietf:params:xml:ns:netconf:exi:1.0\">\n" +
+                "<alignment>byte-aligned</alignment>\n" +
+                "<fidelity>\n" +
+                "<comments/>\n" +
+                "<dtd/>\n" +
+                "<lexical-values/>\n" +
+                "<pis/>\n" +
+                "<prefixes/>\n" +
+                "</fidelity>\n" +
+                "</start-exi>\n" +
+                "</rpc>";
+
+        final EXIOptions fullOptions = new EXIOptions();
+        fullOptions.setAlignmentType(AlignmentType.byteAligned);
+        fullOptions.setPreserveLexicalValues(true);
+        fullOptions.setPreserveDTD(true);
+        fullOptions.setPreserveComments(true);
+        fullOptions.setPreserveNS(true);
+        fullOptions.setPreservePIs(true);
+
+        return Arrays.asList(new Object[][]{
+                {noChangeXml, new EXIOptions()},
+                {fullOptionsXml, fullOptions},
+        });
+    }
+
+    private final String controlXml;
+    private final EXIOptions exiOptions;
+
+    public NetconfStartExiMessageTest(final String controlXml, final EXIOptions exiOptions) {
+        this.controlXml = controlXml;
+        this.exiOptions = exiOptions;
+    }
+
+    @Test
+    public void testCreate() throws Exception {
+        final NetconfStartExiMessage startExiMessage = NetconfStartExiMessage.create(exiOptions, "id");
+
+        XMLUnit.setIgnoreWhitespace(true);
+        XMLUnit.setIgnoreAttributeOrder(true);
+        final Diff diff = XMLUnit.compareXML(XMLUnit.buildControlDocument(controlXml), startExiMessage.getDocument());
+        assertTrue(diff.toString(), diff.similar());
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandlerTest.java b/opendaylight/netconf/netconf-netty-util/src/test/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandlerTest.java
new file mode 100644 (file)
index 0000000..223f2c7
--- /dev/null
@@ -0,0 +1,625 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.nettyutil.handler.ssh.client;
+
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Matchers.anyObject;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.verifyZeroInteractions;
+
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
+import java.io.IOException;
+import java.net.SocketAddress;
+
+import java.nio.channels.WritePendingException;
+import org.apache.sshd.ClientChannel;
+import org.apache.sshd.ClientSession;
+import org.apache.sshd.SshClient;
+import org.apache.sshd.client.channel.ChannelSubsystem;
+import org.apache.sshd.client.future.AuthFuture;
+import org.apache.sshd.client.future.ConnectFuture;
+import org.apache.sshd.client.future.OpenFuture;
+import org.apache.sshd.common.future.CloseFuture;
+import org.apache.sshd.common.future.SshFuture;
+import org.apache.sshd.common.future.SshFutureListener;
+import org.apache.sshd.common.io.IoInputStream;
+import org.apache.sshd.common.io.IoOutputStream;
+import org.apache.sshd.common.io.IoReadFuture;
+import org.apache.sshd.common.io.IoWriteFuture;
+import org.apache.sshd.common.util.Buffer;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Matchers;
+import org.mockito.Mock;
+import org.mockito.MockitoAnnotations;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.AuthenticationHandler;
+
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.SettableFuture;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelHandlerContext;
+import io.netty.channel.ChannelPromise;
+
+public class AsyncSshHandlerTest {
+
+    @Mock
+    private SshClient sshClient;
+    @Mock
+    private AuthenticationHandler authHandler;
+    @Mock
+    private ChannelHandlerContext ctx;
+    @Mock
+    private Channel channel;
+    @Mock
+    private SocketAddress remoteAddress;
+    @Mock
+    private SocketAddress localAddress;
+
+    private AsyncSshHandler asyncSshHandler;
+
+    private SshFutureListener<ConnectFuture> sshConnectListener;
+    private SshFutureListener<AuthFuture> sshAuthListener;
+    private SshFutureListener<OpenFuture> sshChannelOpenListener;
+
+    private ChannelPromise promise;
+
+    @Before
+    public void setUp() throws Exception {
+        MockitoAnnotations.initMocks(this);
+        stubAuth();
+        stubSshClient();
+        stubChannel();
+        stubCtx();
+        stubRemoteAddress();
+
+        promise = getMockedPromise();
+
+        asyncSshHandler = new AsyncSshHandler(authHandler, sshClient);
+    }
+
+    @After
+    public void tearDown() throws Exception {
+        sshConnectListener = null;
+        sshAuthListener = null;
+        sshChannelOpenListener = null;
+        promise = null;
+        asyncSshHandler.close(ctx, getMockedPromise());
+    }
+
+    private void stubAuth() throws IOException {
+        doReturn("usr").when(authHandler).getUsername();
+
+        final AuthFuture authFuture = mock(AuthFuture.class);
+        Futures.addCallback(stubAddListener(authFuture), new SuccessFutureListener<AuthFuture>() {
+            @Override
+            public void onSuccess(final SshFutureListener<AuthFuture> result) {
+                sshAuthListener = result;
+            }
+        });
+        doReturn(authFuture).when(authHandler).authenticate(any(ClientSession.class));
+    }
+
+    @SuppressWarnings("unchecked")
+    private <T extends SshFuture<T>> ListenableFuture<SshFutureListener<T>> stubAddListener(final T future) {
+        final SettableFuture<SshFutureListener<T>> listenerSettableFuture = SettableFuture.create();
+
+        doAnswer(new Answer() {
+            @Override
+            public Object answer(final InvocationOnMock invocation) throws Throwable {
+                listenerSettableFuture.set((SshFutureListener<T>) invocation.getArguments()[0]);
+                return null;
+            }
+        }).when(future).addListener(any(SshFutureListener.class));
+
+        return listenerSettableFuture;
+    }
+
+    private void stubRemoteAddress() {
+        doReturn("remote").when(remoteAddress).toString();
+    }
+
+    private void stubCtx() {
+        doReturn(channel).when(ctx).channel();
+        doReturn(ctx).when(ctx).fireChannelActive();
+        doReturn(ctx).when(ctx).fireChannelInactive();
+        doReturn(ctx).when(ctx).fireChannelRead(anyObject());
+        doReturn(getMockedPromise()).when(ctx).newPromise();
+    }
+
+    private void stubChannel() {
+        doReturn("channel").when(channel).toString();
+    }
+
+    private void stubSshClient() {
+        doNothing().when(sshClient).start();
+        final ConnectFuture connectFuture = mock(ConnectFuture.class);
+        Futures.addCallback(stubAddListener(connectFuture), new SuccessFutureListener<ConnectFuture>() {
+            @Override
+            public void onSuccess(final SshFutureListener<ConnectFuture> result) {
+                sshConnectListener = result;
+            }
+        });
+        doReturn(connectFuture).when(sshClient).connect("usr", remoteAddress);
+    }
+
+    @Test
+    public void testConnectSuccess() throws Exception {
+        asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+        final IoInputStream asyncOut = getMockedIoInputStream();
+        final IoOutputStream asyncIn = getMockedIoOutputStream();
+        final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+        final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+        final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+        sshConnectListener.operationComplete(connectFuture);
+        sshAuthListener.operationComplete(getSuccessAuthFuture());
+        sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+        verify(subsystemChannel).setStreaming(ClientChannel.Streaming.Async);
+
+        verify(promise).setSuccess();
+        verifyNoMoreInteractions(promise);
+        verify(ctx).fireChannelActive();
+    }
+
+    @Test
+    public void testRead() throws Exception {
+        asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+        final IoInputStream asyncOut = getMockedIoInputStream();
+        final IoOutputStream asyncIn = getMockedIoOutputStream();
+        final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+        final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+        final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+        sshConnectListener.operationComplete(connectFuture);
+        sshAuthListener.operationComplete(getSuccessAuthFuture());
+        sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+        verify(ctx).fireChannelRead(any(ByteBuf.class));
+    }
+
+    @Test
+    public void testReadClosed() throws Exception {
+        asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+        final IoInputStream asyncOut = getMockedIoInputStream();
+        final IoReadFuture mockedReadFuture = asyncOut.read(null);
+
+        Futures.addCallback(stubAddListener(mockedReadFuture), new SuccessFutureListener<IoReadFuture>() {
+            @Override
+            public void onSuccess(final SshFutureListener<IoReadFuture> result) {
+                doReturn(new IllegalStateException()).when(mockedReadFuture).getException();
+                doReturn(mockedReadFuture).when(mockedReadFuture).removeListener(Matchers.<SshFutureListener<IoReadFuture>>any());
+                doReturn(true).when(asyncOut).isClosing();
+                doReturn(true).when(asyncOut).isClosed();
+                result.operationComplete(mockedReadFuture);
+            }
+        });
+
+        final IoOutputStream asyncIn = getMockedIoOutputStream();
+        final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+        final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+        final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+        sshConnectListener.operationComplete(connectFuture);
+        sshAuthListener.operationComplete(getSuccessAuthFuture());
+        sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+        verify(ctx).fireChannelInactive();
+    }
+
+    @Test
+    public void testReadFail() throws Exception {
+        asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+        final IoInputStream asyncOut = getMockedIoInputStream();
+        final IoReadFuture mockedReadFuture = asyncOut.read(null);
+
+        Futures.addCallback(stubAddListener(mockedReadFuture), new SuccessFutureListener<IoReadFuture>() {
+            @Override
+            public void onSuccess(final SshFutureListener<IoReadFuture> result) {
+                doReturn(new IllegalStateException()).when(mockedReadFuture).getException();
+                doReturn(mockedReadFuture).when(mockedReadFuture).removeListener(Matchers.<SshFutureListener<IoReadFuture>>any());
+                result.operationComplete(mockedReadFuture);
+            }
+        });
+
+        final IoOutputStream asyncIn = getMockedIoOutputStream();
+        final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+        final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+        final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+        sshConnectListener.operationComplete(connectFuture);
+        sshAuthListener.operationComplete(getSuccessAuthFuture());
+        sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+        verify(ctx).fireChannelInactive();
+    }
+
+    @Test
+    public void testWrite() throws Exception {
+        asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+        final IoInputStream asyncOut = getMockedIoInputStream();
+        final IoOutputStream asyncIn = getMockedIoOutputStream();
+        final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+        final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+        final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+        sshConnectListener.operationComplete(connectFuture);
+        sshAuthListener.operationComplete(getSuccessAuthFuture());
+        sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+        final ChannelPromise writePromise = getMockedPromise();
+        asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0, 1, 2, 3, 4, 5}), writePromise);
+
+        verify(writePromise).setSuccess();
+    }
+
+    @Test
+    public void testWriteClosed() throws Exception {
+        asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+        final IoInputStream asyncOut = getMockedIoInputStream();
+        final IoOutputStream asyncIn = getMockedIoOutputStream();
+
+        final IoWriteFuture ioWriteFuture = asyncIn.write(null);
+
+        Futures.addCallback(stubAddListener(ioWriteFuture), new SuccessFutureListener<IoWriteFuture>() {
+            @Override
+            public void onSuccess(final SshFutureListener<IoWriteFuture> result) {
+                doReturn(false).when(ioWriteFuture).isWritten();
+                doReturn(new IllegalStateException()).when(ioWriteFuture).getException();
+                doReturn(true).when(asyncIn).isClosing();
+                doReturn(true).when(asyncIn).isClosed();
+                result.operationComplete(ioWriteFuture);
+            }
+        });
+
+        final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+        final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+        final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+        sshConnectListener.operationComplete(connectFuture);
+        sshAuthListener.operationComplete(getSuccessAuthFuture());
+        sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+        final ChannelPromise writePromise = getMockedPromise();
+        asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0,1,2,3,4,5}), writePromise);
+
+        verify(writePromise).setFailure(any(Throwable.class));
+    }
+
+    @Test
+    public void testWritePendingOne() throws Exception {
+        asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+        final IoInputStream asyncOut = getMockedIoInputStream();
+        final IoOutputStream asyncIn = getMockedIoOutputStream();
+        final IoWriteFuture ioWriteFuture = asyncIn.write(null);
+
+        final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+        final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+        final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+        sshConnectListener.operationComplete(connectFuture);
+        sshAuthListener.operationComplete(getSuccessAuthFuture());
+        sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+        final ChannelPromise firstWritePromise = getMockedPromise();
+
+        // intercept listener for first write, so we can invoke successful write later thus simulate pending of the first write
+        final ListenableFuture<SshFutureListener<IoWriteFuture>> firstWriteListenerFuture = stubAddListener(ioWriteFuture);
+        asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0,1,2,3,4,5}), firstWritePromise);
+        final SshFutureListener<IoWriteFuture> firstWriteListener = firstWriteListenerFuture.get();
+        // intercept second listener, this is the listener for pending write for the pending write to know when pending state ended
+        final ListenableFuture<SshFutureListener<IoWriteFuture>> pendingListener = stubAddListener(ioWriteFuture);
+
+        final ChannelPromise secondWritePromise = getMockedPromise();
+        // now make write throw pending exception
+        doThrow(org.apache.sshd.common.io.WritePendingException.class).when(asyncIn).write(any(Buffer.class));
+        asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0, 1, 2, 3, 4, 5}), secondWritePromise);
+
+        doReturn(ioWriteFuture).when(asyncIn).write(any(Buffer.class));
+
+        verifyZeroInteractions(firstWritePromise, secondWritePromise);
+
+        // make first write stop pending
+        firstWriteListener.operationComplete(ioWriteFuture);
+        // intercept third listener, this is regular listener for second write to determine success or failure
+        final ListenableFuture<SshFutureListener<IoWriteFuture>> afterPendingListener = stubAddListener(ioWriteFuture);
+
+        // notify listener for second write that pending has ended
+        pendingListener.get().operationComplete(ioWriteFuture);
+        // Notify third listener (regular listener for second write) that second write succeeded
+        afterPendingListener.get().operationComplete(ioWriteFuture);
+
+        // verify both write promises successful
+        verify(firstWritePromise).setSuccess();
+        verify(secondWritePromise).setSuccess();
+    }
+
+    @Test
+    public void testWritePendingMax() throws Exception {
+        asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+        final IoInputStream asyncOut = getMockedIoInputStream();
+        final IoOutputStream asyncIn = getMockedIoOutputStream();
+        final IoWriteFuture ioWriteFuture = asyncIn.write(null);
+
+        final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+        final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+        final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+        sshConnectListener.operationComplete(connectFuture);
+        sshAuthListener.operationComplete(getSuccessAuthFuture());
+        sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+        final ChannelPromise firstWritePromise = getMockedPromise();
+
+        // intercept listener for first write, so we can invoke successful write later thus simulate pending of the first write
+        final ListenableFuture<SshFutureListener<IoWriteFuture>> firstWriteListenerFuture = stubAddListener(ioWriteFuture);
+        asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0,1,2,3,4,5}), firstWritePromise);
+
+        final ChannelPromise secondWritePromise = getMockedPromise();
+        // now make write throw pending exception
+        doThrow(org.apache.sshd.common.io.WritePendingException.class).when(asyncIn).write(any(Buffer.class));
+        for (int i = 0; i < 1000; i++) {
+            asyncSshHandler.write(ctx, Unpooled.copiedBuffer(new byte[]{0, 1, 2, 3, 4, 5}), secondWritePromise);
+        }
+
+        verify(ctx).fireChannelInactive();
+    }
+
+    @Test
+    public void testDisconnect() throws Exception {
+        asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+        final IoInputStream asyncOut = getMockedIoInputStream();
+        final IoOutputStream asyncIn = getMockedIoOutputStream();
+        final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+        final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+        final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+        sshConnectListener.operationComplete(connectFuture);
+        sshAuthListener.operationComplete(getSuccessAuthFuture());
+        sshChannelOpenListener.operationComplete(getSuccessOpenFuture());
+
+        final ChannelPromise disconnectPromise = getMockedPromise();
+        asyncSshHandler.disconnect(ctx, disconnectPromise);
+
+        verify(sshSession).close(anyBoolean());
+        verify(disconnectPromise).setSuccess();
+        verify(ctx).fireChannelInactive();
+    }
+
+    private OpenFuture getSuccessOpenFuture() {
+        final OpenFuture failedOpenFuture = mock(OpenFuture.class);
+        doReturn(true).when(failedOpenFuture).isOpened();
+        return failedOpenFuture;
+    }
+
+    private AuthFuture getSuccessAuthFuture() {
+        final AuthFuture authFuture = mock(AuthFuture.class);
+        doReturn(true).when(authFuture).isSuccess();
+        return authFuture;
+    }
+
+    private ConnectFuture getSuccessConnectFuture(final ClientSession sshSession) {
+        final ConnectFuture connectFuture = mock(ConnectFuture.class);
+        doReturn(true).when(connectFuture).isConnected();
+
+        doReturn(sshSession).when(connectFuture).getSession();
+        return connectFuture;
+    }
+
+    private ClientSession getMockedSshSession(final ChannelSubsystem subsystemChannel) throws IOException {
+        final ClientSession sshSession = mock(ClientSession.class);
+
+        doReturn("sshSession").when(sshSession).toString();
+        doReturn("serverVersion").when(sshSession).getServerVersion();
+        doReturn(false).when(sshSession).isClosed();
+        doReturn(false).when(sshSession).isClosing();
+        final CloseFuture closeFuture = mock(CloseFuture.class);
+        Futures.addCallback(stubAddListener(closeFuture), new SuccessFutureListener<CloseFuture>() {
+            @Override
+            public void onSuccess(final SshFutureListener<CloseFuture> result) {
+                doReturn(true).when(closeFuture).isClosed();
+                result.operationComplete(closeFuture);
+            }
+        });
+        doReturn(closeFuture).when(sshSession).close(false);
+
+        doReturn(subsystemChannel).when(sshSession).createSubsystemChannel(anyString());
+
+        return sshSession;
+    }
+
+    private ChannelSubsystem getMockedSubsystemChannel(final IoInputStream asyncOut, final IoOutputStream asyncIn) throws IOException {
+        final ChannelSubsystem subsystemChannel = mock(ChannelSubsystem.class);
+        doNothing().when(subsystemChannel).setStreaming(any(ClientChannel.Streaming.class));
+        final OpenFuture openFuture = mock(OpenFuture.class);
+
+        Futures.addCallback(stubAddListener(openFuture), new SuccessFutureListener<OpenFuture>() {
+            @Override
+            public void onSuccess(final SshFutureListener<OpenFuture> result) {
+                sshChannelOpenListener = result;
+            }
+        });
+
+        doReturn(asyncOut).when(subsystemChannel).getAsyncOut();
+
+        doReturn(openFuture).when(subsystemChannel).open();
+        doReturn(asyncIn).when(subsystemChannel).getAsyncIn();
+        return subsystemChannel;
+    }
+
+    private IoOutputStream getMockedIoOutputStream() {
+        final IoOutputStream mock = mock(IoOutputStream.class);
+        final IoWriteFuture ioWriteFuture = mock(IoWriteFuture.class);
+        doReturn(ioWriteFuture).when(ioWriteFuture).addListener(Matchers.<SshFutureListener<IoWriteFuture>>any());
+        doReturn(true).when(ioWriteFuture).isWritten();
+
+        Futures.addCallback(stubAddListener(ioWriteFuture), new SuccessFutureListener<IoWriteFuture>() {
+            @Override
+            public void onSuccess(final SshFutureListener<IoWriteFuture> result) {
+                result.operationComplete(ioWriteFuture);
+            }
+        });
+
+        doReturn(ioWriteFuture).when(mock).write(any(Buffer.class));
+        doReturn(false).when(mock).isClosed();
+        doReturn(false).when(mock).isClosing();
+        return mock;
+    }
+
+    private IoInputStream getMockedIoInputStream() {
+        final IoInputStream mock = mock(IoInputStream.class);
+        final IoReadFuture ioReadFuture = mock(IoReadFuture.class);
+        doReturn(null).when(ioReadFuture).getException();
+        doReturn(ioReadFuture).when(ioReadFuture).removeListener(Matchers.<SshFutureListener<IoReadFuture>>any());
+        doReturn(5).when(ioReadFuture).getRead();
+        doReturn(new Buffer(new byte[]{0, 1, 2, 3, 4})).when(ioReadFuture).getBuffer();
+        doReturn(ioReadFuture).when(ioReadFuture).addListener(Matchers.<SshFutureListener<IoReadFuture>>any());
+
+        // Always success for read
+        Futures.addCallback(stubAddListener(ioReadFuture), new SuccessFutureListener<IoReadFuture>() {
+            @Override
+            public void onSuccess(final SshFutureListener<IoReadFuture> result) {
+                result.operationComplete(ioReadFuture);
+            }
+        });
+
+        doReturn(ioReadFuture).when(mock).read(any(Buffer.class));
+        doReturn(false).when(mock).isClosed();
+        doReturn(false).when(mock).isClosing();
+        return mock;
+    }
+
+    @Test
+    public void testConnectFailOpenChannel() throws Exception {
+        asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+        final IoInputStream asyncOut = getMockedIoInputStream();
+        final IoOutputStream asyncIn = getMockedIoOutputStream();
+        final ChannelSubsystem subsystemChannel = getMockedSubsystemChannel(asyncOut, asyncIn);
+        final ClientSession sshSession = getMockedSshSession(subsystemChannel);
+        final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+        sshConnectListener.operationComplete(connectFuture);
+
+        sshAuthListener.operationComplete(getSuccessAuthFuture());
+
+        verify(subsystemChannel).setStreaming(ClientChannel.Streaming.Async);
+
+        try {
+            sshChannelOpenListener.operationComplete(getFailedOpenFuture());
+            fail("Exception expected");
+        } catch (final Exception e) {
+            verify(promise).setFailure(any(Throwable.class));
+            verifyNoMoreInteractions(promise);
+            // TODO should ctx.channelInactive be called if we throw exception ?
+        }
+    }
+
+    @Test
+    public void testConnectFailAuth() throws Exception {
+        asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+        final ClientSession sshSession = mock(ClientSession.class);
+        doReturn(true).when(sshSession).isClosed();
+        final ConnectFuture connectFuture = getSuccessConnectFuture(sshSession);
+
+        sshConnectListener.operationComplete(connectFuture);
+
+        final AuthFuture authFuture = getFailedAuthFuture();
+
+        try {
+            sshAuthListener.operationComplete(authFuture);
+            fail("Exception expected");
+        } catch (final Exception e) {
+            verify(promise).setFailure(any(Throwable.class));
+            verifyNoMoreInteractions(promise);
+            // TODO should ctx.channelInactive be called ?
+        }
+    }
+
+    private AuthFuture getFailedAuthFuture() {
+        final AuthFuture authFuture = mock(AuthFuture.class);
+        doReturn(false).when(authFuture).isSuccess();
+        doReturn(new IllegalStateException()).when(authFuture).getException();
+        return authFuture;
+    }
+
+    private OpenFuture getFailedOpenFuture() {
+        final OpenFuture authFuture = mock(OpenFuture.class);
+        doReturn(false).when(authFuture).isOpened();
+        doReturn(new IllegalStateException()).when(authFuture).getException();
+        return authFuture;
+    }
+
+    @Test
+    public void testConnectFail() throws Exception {
+        asyncSshHandler.connect(ctx, remoteAddress, localAddress, promise);
+
+        final ConnectFuture connectFuture = getFailedConnectFuture();
+        try {
+            sshConnectListener.operationComplete(connectFuture);
+            fail("Exception expected");
+        } catch (final Exception e) {
+            verify(promise).setFailure(any(Throwable.class));
+            verifyNoMoreInteractions(promise);
+            // TODO should ctx.channelInactive be called ?
+        }
+    }
+
+    private ConnectFuture getFailedConnectFuture() {
+        final ConnectFuture connectFuture = mock(ConnectFuture.class);
+        doReturn(false).when(connectFuture).isConnected();
+        doReturn(new IllegalStateException()).when(connectFuture).getException();
+        return connectFuture;
+    }
+
+    private ChannelPromise getMockedPromise() {
+        final ChannelPromise promise = mock(ChannelPromise.class);
+        doReturn(promise).when(promise).setSuccess();
+        doReturn(promise).when(promise).setFailure(any(Throwable.class));
+        return promise;
+    }
+
+    private static abstract class SuccessFutureListener<T extends SshFuture<T>> implements FutureCallback<SshFutureListener<T>> {
+
+        @Override
+        public abstract void onSuccess(final SshFutureListener<T> result);
+
+        @Override
+        public void onFailure(final Throwable t) {
+            throw new RuntimeException(t);
+        }
+    }
+}
index c686bcbc66fcb849da3f357bdd3bea2df7f62f2e..0d0f95c3cb4d0856ba56c470f0890eb51371caa1 100644 (file)
@@ -123,6 +123,7 @@ public class NetconfSSHActivator implements BundleActivator {
             final AuthProvider authService = bundleContext.getService(reference);
             final Integer newServicePreference = getPreference(reference);
             if(isBetter(newServicePreference)) {
+                maxPreference = newServicePreference;
                 server.setAuthProvider(authService);
                 if(sshThread == null) {
                     sshThread = runNetconfSshThread(server);
index ce1400bbcb24a5d455ad179a93da6098530c5790..eb2b644cbca1fbaa5f98c2b704d3893ac55e9fa4 100644 (file)
@@ -67,7 +67,9 @@ public class SSHTest {
         netconfSSHServer.setAuthProvider(authProvider);
 
         InetSocketAddress address = netconfSSHServer.getLocalSocketAddress();
-        final EchoClientHandler echoClientHandler = connectClient(address);
+
+        final EchoClientHandler echoClientHandler = connectClient(new InetSocketAddress("localhost", address.getPort()));
+
         Stopwatch stopwatch = new Stopwatch().start();
         while(echoClientHandler.isConnected() == false && stopwatch.elapsed(TimeUnit.SECONDS) < 5) {
             Thread.sleep(100);
index 2cd5b19bd12a4c44a222a43acd8004df82debef1..600baa743169744a9e2019ef116ee98f4a194c4e 100644 (file)
@@ -93,7 +93,7 @@ public class NetconfDeviceSimulator implements Closeable {
         this.hashedWheelTimer = hashedWheelTimer;
     }
 
-    private NetconfServerDispatcher createDispatcher(final Map<ModuleBuilder, String> moduleBuilders, final boolean exi) {
+    private NetconfServerDispatcher createDispatcher(final Map<ModuleBuilder, String> moduleBuilders, final boolean exi, final int generateConfigsTimeout) {
 
         final Set<Capability> capabilities = Sets.newHashSet(Collections2.transform(moduleBuilders.keySet(), new Function<ModuleBuilder, Capability>() {
             @Override
@@ -115,7 +115,7 @@ public class NetconfDeviceSimulator implements Closeable {
                 : Sets.newHashSet(XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_0, XmlNetconfConstants.URN_IETF_PARAMS_NETCONF_BASE_1_1);
 
         final NetconfServerSessionNegotiatorFactory serverNegotiatorFactory = new NetconfServerSessionNegotiatorFactory(
-                hashedWheelTimer, simulatedOperationProvider, idProvider, CONNECTION_TIMEOUT_MILLIS, commitNotifier, new LoggingMonitoringService(), serverCapabilities);
+                hashedWheelTimer, simulatedOperationProvider, idProvider, generateConfigsTimeout, commitNotifier, new LoggingMonitoringService(), serverCapabilities);
 
         final NetconfServerDispatcher.ServerChannelInitializer serverChannelInitializer = new NetconfServerDispatcher.ServerChannelInitializer(
                 serverNegotiatorFactory);
@@ -153,7 +153,7 @@ public class NetconfDeviceSimulator implements Closeable {
     public List<Integer> start(final Main.Params params) {
         final Map<ModuleBuilder, String> moduleBuilders = parseSchemasToModuleBuilders(params);
 
-        final NetconfServerDispatcher dispatcher = createDispatcher(moduleBuilders, params.exi);
+        final NetconfServerDispatcher dispatcher = createDispatcher(moduleBuilders, params.exi, params.generateConfigsTimeout);
 
         int currentPort = params.startingPort;
 
index d314c31b4fcd6288821109046e59cf1b20fd7ef8..da5e2090ff4cfb70d0c253ad339154dccd9ac107 100644 (file)
@@ -71,7 +71,7 @@ public class AuthProviderImpl implements AuthProvider {
     }
 
     @VisibleForTesting
-    void setNullableUserManager(final IUserManager nullableUserManager) {
+    synchronized void setNullableUserManager(final IUserManager nullableUserManager) {
         this.nullableUserManager = nullableUserManager;
     }
 }
index df4d389705b29eda03cb3e88a553ba14902720f3..bed58beb0f3494a6914986c339af55611c3572b4 100644 (file)
       <artifactId>xmlunit</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>mockito-configuration</artifactId>
+    </dependency>
   </dependencies>
 
   <build>
index 25e0f7926574c3168bc013cff865ec2f6a8a33db..b6f5854aa331140cf21c1de433c5f4a9263e6201 100644 (file)
@@ -116,8 +116,8 @@ public abstract class AbstractNetconfOperation implements NetconfOperation {
             rpcReply.appendChild(responseNS);
         }
 
-        for (String attrName : attributes.keySet()) {
-            rpcReply.setAttributeNode((Attr) document.importNode(attributes.get(attrName), true));
+        for (Attr attribute : attributes.values()) {
+            rpcReply.setAttributeNode((Attr) document.importNode(attribute, true));
         }
         document.appendChild(rpcReply);
         return document;
index 33934d10ba1c8ec9db8b29fe2135eda9cdcf499b..15223cb60ba994472cdedc939a6586f569471593 100644 (file)
@@ -64,10 +64,12 @@ public final class NetconfHelloMessage extends NetconfMessage {
         Document doc = XmlUtil.newDocument();
         Element helloElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
                 HELLO_TAG);
-        Element capabilitiesElement = doc.createElement(XmlNetconfConstants.CAPABILITIES);
+        Element capabilitiesElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+                XmlNetconfConstants.CAPABILITIES);
 
         for (String capability : Sets.newHashSet(capabilities)) {
-            Element capElement = doc.createElement(XmlNetconfConstants.CAPABILITY);
+            Element capElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+                    XmlNetconfConstants.CAPABILITY);
             capElement.setTextContent(capability);
             capabilitiesElement.appendChild(capElement);
         }
@@ -80,7 +82,8 @@ public final class NetconfHelloMessage extends NetconfMessage {
 
     public static NetconfHelloMessage createServerHello(Set<String> capabilities, long sessionId) throws NetconfDocumentedException {
         Document doc = createHelloMessageDoc(capabilities);
-        Element sessionIdElement = doc.createElement(XmlNetconfConstants.SESSION_ID);
+        Element sessionIdElement = doc.createElementNS(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0,
+                XmlNetconfConstants.SESSION_ID);
         sessionIdElement.setTextContent(Long.toString(sessionId));
         doc.getDocumentElement().appendChild(sessionIdElement);
         return new NetconfHelloMessage(doc);
index 3e8040ad8a1d87e916415795e08e38814963537b..c532b7f9a6f235a0eebc7dc4a53bc10f2208dbc4 100644 (file)
@@ -10,7 +10,9 @@ package org.opendaylight.controller.netconf.util.messages;
 
 import com.google.common.base.Function;
 import com.google.common.collect.Collections2;
-
+import java.util.Collection;
+import java.util.List;
+import javax.annotation.Nonnull;
 import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
 import org.opendaylight.controller.netconf.api.NetconfMessage;
 import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
@@ -19,11 +21,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 
-import javax.annotation.Nullable;
-
-import java.util.Collection;
-import java.util.List;
-
 public final class NetconfMessageUtil {
 
     private static final Logger logger = LoggerFactory.getLogger(NetconfMessageUtil.class);
@@ -67,9 +64,8 @@ public final class NetconfMessageUtil {
         List<XmlElement> caps = capabilitiesElement.getChildElements(XmlNetconfConstants.CAPABILITY);
         return Collections2.transform(caps, new Function<XmlElement, String>() {
 
-            @Nullable
             @Override
-            public String apply(@Nullable XmlElement input) {
+            public String apply(@Nonnull XmlElement input) {
                 // Trim possible leading/tailing whitespace
                 try {
                     return input.getTextContent().trim();
index 333fea3493172286fdba2c807eff105760741411..c77e0d7da25dc112e54489fbae42bf317a3f2b3f 100644 (file)
@@ -53,29 +53,6 @@ public final class NetconfConfigUtil {
         }
     }
 
-    /**
-     * Get extracted address or default.
-     *
-     * @throws java.lang.IllegalStateException if neither address is present.
-     */
-    private static InetSocketAddress getNetconfAddress(final InetSocketAddress defaultAddress, Optional<InetSocketAddress> extractedAddress, InfixProp infix) {
-        InetSocketAddress inetSocketAddress;
-
-        if (extractedAddress.isPresent() == false) {
-            logger.debug("Netconf {} address not found, falling back to default {}", infix, defaultAddress);
-
-            if (defaultAddress == null) {
-                logger.warn("Netconf {} address not found, default address not provided", infix);
-                throw new IllegalStateException("Netconf " + infix + " address not found, default address not provided");
-            }
-            inetSocketAddress = defaultAddress;
-        } else {
-            inetSocketAddress = extractedAddress.get();
-        }
-
-        return inetSocketAddress;
-    }
-
     public static String getPrivateKeyPath(final BundleContext context) {
         return getPropertyValue(context, getPrivateKeyKey());
     }
index 4e3a66b7ec5f7edfe0399bcab74e6a1843ed0449..78efe7e9723154bd218bafa5d248af0329fb567d 100644 (file)
@@ -20,7 +20,6 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import javax.annotation.Nullable;
 import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
 import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
 import org.opendaylight.controller.netconf.util.exception.UnexpectedElementException;
@@ -204,7 +203,7 @@ public final class XmlElement {
         return Lists.newArrayList(Collections2.filter(getChildElementsWithinNamespace(namespace),
                 new Predicate<XmlElement>() {
                     @Override
-                    public boolean apply(@Nullable XmlElement xmlElement) {
+                    public boolean apply(XmlElement xmlElement) {
                         return xmlElement.getName().equals(childName);
                     }
                 }));
@@ -298,7 +297,7 @@ public final class XmlElement {
         List<XmlElement> children = getChildElementsWithinNamespace(namespace);
         children = Lists.newArrayList(Collections2.filter(children, new Predicate<XmlElement>() {
             @Override
-            public boolean apply(@Nullable XmlElement xmlElement) {
+            public boolean apply(XmlElement xmlElement) {
                 return xmlElement.getName().equals(childName);
             }
         }));
@@ -436,7 +435,7 @@ public final class XmlElement {
         List<XmlElement> children = getChildElementsWithinNamespace(getNamespace());
         return Lists.newArrayList(Collections2.filter(children, new Predicate<XmlElement>() {
             @Override
-            public boolean apply(@Nullable XmlElement xmlElement) {
+            public boolean apply(XmlElement xmlElement) {
                 return xmlElement.getName().equals(childName);
             }
         }));
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/CloseableUtilTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/CloseableUtilTest.java
new file mode 100644 (file)
index 0000000..8d41ad7
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+
+import com.google.common.collect.Lists;
+import org.junit.Test;
+
+public class CloseableUtilTest {
+
+    @Test
+    public void testCloseAllFail() throws Exception {
+        final AutoCloseable failingCloseable = new AutoCloseable() {
+            @Override
+            public void close() throws Exception {
+                throw new RuntimeException("testing failing close");
+            }
+        };
+
+        try {
+            CloseableUtil.closeAll(Lists.newArrayList(failingCloseable, failingCloseable));
+            fail("Exception with suppressed should be thrown");
+        } catch (final RuntimeException e) {
+            assertEquals(1, e.getSuppressed().length);
+        }
+    }
+
+    @Test
+    public void testCloseAll() throws Exception {
+        final AutoCloseable failingCloseable = mock(AutoCloseable.class);
+        doNothing().when(failingCloseable).close();
+        CloseableUtil.closeAll(Lists.newArrayList(failingCloseable, failingCloseable));
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractLastNetconfOperationTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractLastNetconfOperationTest.java
new file mode 100644 (file)
index 0000000..62633dd
--- /dev/null
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class AbstractLastNetconfOperationTest {
+    class LastNetconfOperationImplTest extends  AbstractLastNetconfOperation  {
+
+        boolean handleWithNoSubsequentOperationsRun;
+
+        protected LastNetconfOperationImplTest(String netconfSessionIdForReporting) {
+            super(netconfSessionIdForReporting);
+            handleWithNoSubsequentOperationsRun = false;
+        }
+
+        @Override
+        protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+            handleWithNoSubsequentOperationsRun = true;
+            return null;
+        }
+
+        @Override
+        protected String getOperationName() {
+            return "";
+        }
+    }
+
+    LastNetconfOperationImplTest netconfOperation;
+
+    @Before
+    public void setUp() throws Exception {
+        netconfOperation = new LastNetconfOperationImplTest("");
+    }
+
+    @Test
+    public void testNetconfOperation() throws Exception {
+        netconfOperation.handleWithNoSubsequentOperations(null, null);
+        assertTrue(netconfOperation.handleWithNoSubsequentOperationsRun);
+        assertEquals(HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY, netconfOperation.getHandlingPriority());
+    }
+
+    @Test(expected = NetconfDocumentedException.class)
+    public void testHandle() throws Exception {
+        NetconfOperationChainedExecution operation = mock(NetconfOperationChainedExecution.class);
+        doReturn("").when(operation).toString();
+
+        doReturn(false).when(operation).isExecutionTermination();
+        netconfOperation.handle(null, null, operation);
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractNetconfOperationTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractNetconfOperationTest.java
new file mode 100644 (file)
index 0000000..ea4a6e6
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import java.io.IOException;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.xml.sax.SAXException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
+public class AbstractNetconfOperationTest {
+
+    class NetconfOperationImpl extends AbstractNetconfOperation {
+
+        public boolean handleRun;
+
+        protected NetconfOperationImpl(String netconfSessionIdForReporting) {
+            super(netconfSessionIdForReporting);
+            this.handleRun = false;
+        }
+
+        @Override
+        protected String getOperationName() {
+            return null;
+        }
+
+        @Override
+        protected Element handle(Document document, XmlElement message, NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+            this.handleRun = true;
+            try {
+                return XmlUtil.readXmlToElement("<element/>");
+            } catch (SAXException | IOException e) {
+                throw new RuntimeException(e);
+            }
+        }
+    }
+
+    private NetconfOperationImpl netconfOperation;
+    private NetconfOperationChainedExecution operation;
+
+    @Before
+    public void setUp() throws Exception {
+        netconfOperation = new NetconfOperationImpl("str");
+        operation = mock(NetconfOperationChainedExecution.class);
+    }
+
+    @Test
+    public void testAbstractNetconfOperation() throws Exception {
+        Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/edit_config.xml");
+        assertEquals(netconfOperation.getNetconfSessionIdForReporting(), "str");
+        assertNotNull(netconfOperation.canHandle(helloMessage));
+        assertEquals(netconfOperation.getHandlingPriority(), HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY);
+
+        netconfOperation.handle(helloMessage, operation);
+        assertTrue(netconfOperation.handleRun);
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractSingletonNetconfOperationTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/mapping/AbstractSingletonNetconfOperationTest.java
new file mode 100644 (file)
index 0000000..d1310de
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.mapping;
+
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import static org.junit.Assert.assertEquals;
+
+public class AbstractSingletonNetconfOperationTest {
+    class SingletonNCOperationImpl extends AbstractSingletonNetconfOperation {
+
+        protected SingletonNCOperationImpl(String netconfSessionIdForReporting) {
+            super(netconfSessionIdForReporting);
+        }
+
+        @Override
+        protected Element handleWithNoSubsequentOperations(Document document, XmlElement operationElement) throws NetconfDocumentedException {
+            return null;
+        }
+
+        @Override
+        protected String getOperationName() {
+            return null;
+        }
+    }
+
+    @Test
+    public void testAbstractSingletonNetconfOperation() throws Exception {
+        SingletonNCOperationImpl operation = new SingletonNCOperationImpl("");
+        assertEquals(operation.getHandlingPriority(), HandlingPriority.HANDLE_WITH_MAX_PRIORITY);
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessageAdditionalHeaderTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessageAdditionalHeaderTest.java
new file mode 100644 (file)
index 0000000..95c9124
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class NetconfHelloMessageAdditionalHeaderTest {
+
+
+    private String customHeader = "[user;1.1.1.1:40;tcp;client;]";
+    private NetconfHelloMessageAdditionalHeader header;
+
+    @Before
+    public void setUp() throws Exception {
+        header = new NetconfHelloMessageAdditionalHeader("user", "1.1.1.1", "40", "tcp", "client");
+    }
+
+    @Test
+    public void testGetters() throws Exception {
+        assertEquals(header.getAddress(), "1.1.1.1");
+        assertEquals(header.getUserName(), "user");
+        assertEquals(header.getPort(), "40");
+        assertEquals(header.getTransport(), "tcp");
+        assertEquals(header.getSessionIdentifier(), "client");
+    }
+
+    @Test
+    public void testStaticConstructor() throws Exception {
+        NetconfHelloMessageAdditionalHeader h = NetconfHelloMessageAdditionalHeader.fromString(customHeader);
+        assertEquals(h.toString(), header.toString());
+        assertEquals(h.toFormattedString(), header.toFormattedString());
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessageTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessageTest.java
new file mode 100644 (file)
index 0000000..c39ac8e
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+
+import com.google.common.base.Optional;
+import java.util.Set;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.internal.util.collections.Sets;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class NetconfHelloMessageTest {
+
+    Set<String> caps;
+
+    @Before
+    public void setUp() throws Exception {
+        caps = Sets.newSet("cap1");
+    }
+
+    @Test
+    public void testConstructor() throws Exception {
+        NetconfHelloMessageAdditionalHeader additionalHeader = new NetconfHelloMessageAdditionalHeader("name","host","1","transp","id");
+        NetconfHelloMessage message = NetconfHelloMessage.createClientHello(caps, Optional.of(additionalHeader));
+        assertTrue(message.isHelloMessage(message));
+        assertEquals(Optional.of(additionalHeader), message.getAdditionalHeader());
+
+        NetconfHelloMessage serverMessage = NetconfHelloMessage.createServerHello(caps, 100L);
+        assertTrue(serverMessage.isHelloMessage(serverMessage));
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageHeaderTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageHeaderTest.java
new file mode 100644 (file)
index 0000000..cca89ae
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import com.google.common.base.Charsets;
+import java.util.Arrays;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+
+public class NetconfMessageHeaderTest {
+    @Test
+    public void testGet() throws Exception {
+        NetconfMessageHeader header = new NetconfMessageHeader(10);
+        assertEquals(header.getLength(), 10);
+
+        byte[] expectedValue = "\n#10\n".getBytes(Charsets.US_ASCII);
+        assertArrayEquals(expectedValue, header.toBytes());
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageUtilTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageUtilTest.java
new file mode 100644 (file)
index 0000000..2af34e9
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import java.util.Collection;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.w3c.dom.Document;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class NetconfMessageUtilTest {
+    @Test
+    public void testNetconfMessageUtil() throws Exception {
+        Document okMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc-reply_ok.xml");
+        assertTrue(NetconfMessageUtil.isOKMessage(new NetconfMessage(okMessage)));
+        assertFalse(NetconfMessageUtil.isErrorMessage(new NetconfMessage(okMessage)));
+
+        Document errorMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/communicationError/testClientSendsRpcReply_expectedResponse.xml");
+        assertTrue(NetconfMessageUtil.isErrorMessage(new NetconfMessage(errorMessage)));
+        assertFalse(NetconfMessageUtil.isOKMessage(new NetconfMessage(errorMessage)));
+
+        Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/client_hello.xml");
+        Collection<String> caps = NetconfMessageUtil.extractCapabilitiesFromHello(new NetconfMessage(helloMessage).getDocument());
+        assertTrue(caps.contains("urn:ietf:params:netconf:base:1.0"));
+        assertTrue(caps.contains("urn:ietf:params:netconf:base:1.1"));
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/SendErrorExceptionUtilTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/messages/SendErrorExceptionUtilTest.java
new file mode 100644 (file)
index 0000000..c8d562c
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.messages;
+
+import io.netty.channel.Channel;
+import io.netty.channel.ChannelFuture;
+import io.netty.util.concurrent.GenericFutureListener;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.api.NetconfSession;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.w3c.dom.Document;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.*;
+
+public class SendErrorExceptionUtilTest {
+
+    NetconfSession netconfSession;
+    ChannelFuture channelFuture;
+    Channel channel;
+    private NetconfDocumentedException exception;
+
+    @Before
+    public void setUp() throws Exception {
+        netconfSession = mock(NetconfSession.class);
+        channelFuture = mock(ChannelFuture.class);
+        channel = mock(Channel.class);
+        doReturn(channelFuture).when(netconfSession).sendMessage(any(NetconfMessage.class));
+        doReturn(channelFuture).when(channelFuture).addListener(any(GenericFutureListener.class));
+        doReturn(channelFuture).when(channel).writeAndFlush(any(NetconfMessage.class));
+        exception = new NetconfDocumentedException("err");
+    }
+
+    @Test
+    public void testSendErrorMessage1() throws Exception {
+        SendErrorExceptionUtil.sendErrorMessage(netconfSession, exception);
+        verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+        verify(netconfSession, times(1)).sendMessage(any(NetconfMessage.class));
+    }
+
+    @Test
+    public void testSendErrorMessage2() throws Exception {
+        SendErrorExceptionUtil.sendErrorMessage(channel, exception);
+        verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+    }
+
+    @Test
+    public void testSendErrorMessage3() throws Exception {
+        Document helloMessage = XmlFileLoader.xmlFileToDocument("netconfMessages/rpc.xml");
+        SendErrorExceptionUtil.sendErrorMessage(netconfSession, exception, new NetconfMessage(helloMessage));
+        verify(channelFuture, times(1)).addListener(any(GenericFutureListener.class));
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/osgi/NetconfConfigUtilTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/osgi/NetconfConfigUtilTest.java
new file mode 100644 (file)
index 0000000..741d0d2
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.osgi;
+
+import com.google.common.base.Optional;
+import io.netty.channel.local.LocalAddress;
+import java.net.InetSocketAddress;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.util.NetconfUtil;
+import org.osgi.framework.BundleContext;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+
+public class NetconfConfigUtilTest {
+
+    private BundleContext bundleContext;
+
+    @Before
+    public void setUp() throws Exception {
+        bundleContext = mock(BundleContext.class);
+    }
+
+    @Test
+    public void testNetconfConfigUtil() throws Exception {
+        assertEquals(NetconfConfigUtil.getNetconfLocalAddress(), new LocalAddress("netconf"));
+
+        doReturn("").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
+        assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+
+        doReturn("a").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
+        assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+    }
+
+    @Test
+    public void testgetPrivateKeyKey() throws Exception {
+        assertEquals(NetconfConfigUtil.getPrivateKeyKey(), "netconf.ssh.pk.path");
+    }
+
+    @Test
+    public void testgetNetconfServerAddressKey() throws Exception {
+        NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+        assertEquals(NetconfConfigUtil.getNetconfServerAddressKey(prop), "netconf.tcp.address");
+    }
+
+    @Test
+    public void testExtractNetconfServerAddress() throws Exception {
+        NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+        doReturn("").when(bundleContext).getProperty(anyString());
+        assertEquals(NetconfConfigUtil.extractNetconfServerAddress(bundleContext, prop), Optional.absent());
+    }
+
+    @Test
+    public void testExtractNetconfServerAddress2() throws Exception {
+        NetconfConfigUtil.InfixProp prop = NetconfConfigUtil.InfixProp.tcp;
+        doReturn("1.1.1.1").when(bundleContext).getProperty("netconf.tcp.address");
+        doReturn("20").when(bundleContext).getProperty("netconf.tcp.port");
+        Optional<InetSocketAddress> inetSocketAddressOptional = NetconfConfigUtil.extractNetconfServerAddress(bundleContext, prop);
+        assertTrue(inetSocketAddressOptional.isPresent());
+        assertEquals(inetSocketAddressOptional.get(), new InetSocketAddress("1.1.1.1", 20));
+    }
+
+    @Test
+    public void testGetPrivateKeyPath() throws Exception {
+        doReturn("path").when(bundleContext).getProperty("netconf.ssh.pk.path");
+        assertEquals(NetconfConfigUtil.getPrivateKeyPath(bundleContext), "path");
+    }
+
+    @Test(expected = IllegalStateException.class)
+    public void testGetPrivateKeyPath2() throws Exception {
+        doReturn(null).when(bundleContext).getProperty("netconf.ssh.pk.path");
+        assertEquals(NetconfConfigUtil.getPrivateKeyPath(bundleContext), "path");
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/HardcodedNamespaceResolverTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/HardcodedNamespaceResolverTest.java
new file mode 100644 (file)
index 0000000..f083cc1
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.xml;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
+import org.junit.Test;
+
+public class HardcodedNamespaceResolverTest {
+
+    @Test
+    public void testResolver() throws Exception {
+        final HardcodedNamespaceResolver hardcodedNamespaceResolver = new HardcodedNamespaceResolver("prefix", "namespace");
+
+        assertEquals("namespace", hardcodedNamespaceResolver.getNamespaceURI("prefix"));
+        try{
+            hardcodedNamespaceResolver.getNamespaceURI("unknown");
+            fail("Unknown namespace lookup should fail");
+        } catch(IllegalStateException e) {}
+
+        assertNull(hardcodedNamespaceResolver.getPrefix("any"));
+        assertNull(hardcodedNamespaceResolver.getPrefixes("any"));
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/XmlElementTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/XmlElementTest.java
new file mode 100644 (file)
index 0000000..a88de95
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.xml;
+
+import static org.hamcrest.CoreMatchers.both;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.Map;
+import org.junit.Before;
+import org.junit.Test;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import com.google.common.base.Optional;
+
+public class XmlElementTest {
+
+    private final String elementAsString = "<top xmlns=\"namespace\" xmlns:a=\"attrNamespace\" a:attr1=\"value1\" attr2=\"value2\">" +
+            "<inner>" +
+            "<deepInner>deepValue</deepInner>" +
+            "</inner>" +
+            "<innerNamespace xmlns=\"innerNamespace\">innerNamespaceValue</innerNamespace>" +
+            "<innerPrefixed xmlns:b=\"prefixedValueNamespace\">b:valueWithPrefix</innerPrefixed>" +
+            "</top>";
+    private Document document;
+    private Element element;
+    private XmlElement xmlElement;
+
+    @Before
+    public void setUp() throws Exception {
+        document = XmlUtil.readXmlToDocument(elementAsString);
+        element = document.getDocumentElement();
+        xmlElement = XmlElement.fromDomElement(element);
+    }
+
+    @Test
+    public void testConstruct() throws Exception {
+        final XmlElement fromString = XmlElement.fromString(elementAsString);
+        assertEquals(fromString, xmlElement);
+        XmlElement.fromDomDocument(document);
+        XmlElement.fromDomElement(element);
+        XmlElement.fromDomElementWithExpected(element, "top");
+        XmlElement.fromDomElementWithExpected(element, "top", "namespace");
+
+        try {
+            XmlElement.fromString("notXml");
+            fail();
+        } catch (final NetconfDocumentedException e) {}
+
+        try {
+            XmlElement.fromDomElementWithExpected(element, "notTop");
+            fail();
+        } catch (final NetconfDocumentedException e) {}
+
+        try {
+            XmlElement.fromDomElementWithExpected(element, "top", "notNamespace");
+            fail();
+        } catch (final NetconfDocumentedException e) {}
+    }
+
+    @Test
+    public void testGetters() throws Exception {
+        assertEquals(element, xmlElement.getDomElement());
+        assertEquals(element.getElementsByTagName("inner").getLength(), xmlElement.getElementsByTagName("inner").getLength());
+
+        assertEquals("top", xmlElement.getName());
+        assertTrue(xmlElement.hasNamespace());
+        assertEquals("namespace", xmlElement.getNamespace());
+        assertEquals("namespace", xmlElement.getNamespaceAttribute());
+        assertEquals(Optional.of("namespace"), xmlElement.getNamespaceOptionally());
+
+        assertEquals("value1", xmlElement.getAttribute("attr1", "attrNamespace"));
+        assertEquals("value2", xmlElement.getAttribute("attr2"));
+        assertEquals(2 + 2/*Namespace definition*/, xmlElement.getAttributes().size());
+
+        assertEquals(3, xmlElement.getChildElements().size());
+        assertEquals(1, xmlElement.getChildElements("inner").size());
+        assertTrue(xmlElement.getOnlyChildElementOptionally("inner").isPresent());
+        assertTrue(xmlElement.getOnlyChildElementWithSameNamespaceOptionally("inner").isPresent());
+        assertEquals(0, xmlElement.getChildElements("unknown").size());
+        assertFalse(xmlElement.getOnlyChildElementOptionally("unknown").isPresent());
+        assertEquals(1, xmlElement.getChildElementsWithSameNamespace("inner").size());
+        assertEquals(0, xmlElement.getChildElementsWithSameNamespace("innerNamespace").size());
+        assertEquals(1, xmlElement.getChildElementsWithinNamespace("innerNamespace", "innerNamespace").size());
+        assertTrue(xmlElement.getOnlyChildElementOptionally("innerNamespace", "innerNamespace").isPresent());
+        assertFalse(xmlElement.getOnlyChildElementOptionally("innerNamespace", "unknownNamespace").isPresent());
+
+        final XmlElement noNamespaceElement = XmlElement.fromString("<noNamespace/>");
+        assertFalse(noNamespaceElement.hasNamespace());
+        try {
+            noNamespaceElement.getNamespace();
+            fail();
+        } catch (final MissingNameSpaceException e) {}
+
+        final XmlElement inner = xmlElement.getOnlyChildElement("inner");
+        final XmlElement deepInner = inner.getOnlyChildElementWithSameNamespaceOptionally().get();
+        assertEquals(deepInner, inner.getOnlyChildElementWithSameNamespace());
+        assertEquals(Optional.<XmlElement>absent(), xmlElement.getOnlyChildElementOptionally("unknown"));
+        assertEquals("deepValue", deepInner.getTextContent());
+        assertEquals("deepValue", deepInner.getOnlyTextContentOptionally().get());
+        assertEquals("deepValue", deepInner.getOnlyTextContentOptionally().get());
+    }
+
+    @Test
+    public void testExtractNamespaces() throws Exception {
+        final XmlElement innerPrefixed = xmlElement.getOnlyChildElement("innerPrefixed");
+        Map.Entry<String, String> namespaceOfTextContent = innerPrefixed.findNamespaceOfTextContent();
+
+        assertNotNull(namespaceOfTextContent);
+        assertEquals("b", namespaceOfTextContent.getKey());
+        assertEquals("prefixedValueNamespace", namespaceOfTextContent.getValue());
+        final XmlElement innerNamespace = xmlElement.getOnlyChildElement("innerNamespace");
+        namespaceOfTextContent = innerNamespace.findNamespaceOfTextContent();
+
+        assertEquals("", namespaceOfTextContent.getKey());
+        assertEquals("innerNamespace", namespaceOfTextContent.getValue());
+    }
+
+    @Test
+    public void testUnrecognisedElements() throws Exception {
+        xmlElement.checkUnrecognisedElements(xmlElement.getOnlyChildElement("inner"), xmlElement.getOnlyChildElement("innerPrefixed"), xmlElement.getOnlyChildElement("innerNamespace"));
+
+        try {
+            xmlElement.checkUnrecognisedElements(xmlElement.getOnlyChildElement("inner"));
+            fail();
+        } catch (final NetconfDocumentedException e) {
+            assertThat(e.getMessage(), both(containsString("innerNamespace")).and(containsString("innerNamespace")));
+        }
+    }
+}
diff --git a/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/XmlUtilTest.java b/opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/xml/XmlUtilTest.java
new file mode 100644 (file)
index 0000000..3796dd9
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.util.xml;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import com.google.common.base.Optional;
+import javax.xml.xpath.XPathConstants;
+import javax.xml.xpath.XPathExpression;
+import org.custommonkey.xmlunit.Diff;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Test;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.xml.sax.SAXParseException;
+
+public class XmlUtilTest {
+
+    private final String xml = "<top xmlns=\"namespace\">\n" +
+            "<innerText>value</innerText>\n" +
+            "<innerPrefixedText xmlns:pref=\"prefixNamespace\">prefix:value</innerPrefixedText>\n" +
+            "<innerPrefixedText xmlns=\"randomNamespace\" xmlns:pref=\"prefixNamespace\">prefix:value</innerPrefixedText>\n" +
+            "</top>";
+
+    @Test
+    public void testCreateElement() throws Exception {
+        final Document document = XmlUtil.newDocument();
+        final Element top = XmlUtil.createElement(document, "top", Optional.of("namespace"));
+
+        top.appendChild(XmlUtil.createTextElement(document, "innerText", "value", Optional.of("namespace")));
+        top.appendChild(XmlUtil.createTextElementWithNamespacedContent(document, "innerPrefixedText", "pref", "prefixNamespace", "value", Optional.of("namespace")));
+        top.appendChild(XmlUtil.createTextElementWithNamespacedContent(document, "innerPrefixedText", "pref", "prefixNamespace", "value", Optional.of("randomNamespace")));
+
+        document.appendChild(top);
+        assertEquals("top", XmlUtil.createDocumentCopy(document).getDocumentElement().getTagName());
+
+        XMLUnit.setIgnoreAttributeOrder(true);
+        XMLUnit.setIgnoreWhitespace(true);
+
+        final Diff diff = XMLUnit.compareXML(XMLUnit.buildControlDocument(xml), document);
+        assertTrue(diff.toString(), diff.similar());
+    }
+
+    @Test
+    public void testLoadSchema() throws Exception {
+        XmlUtil.loadSchema();
+        try {
+            XmlUtil.loadSchema(getClass().getResourceAsStream("/netconfMessages/commit.xml"));
+            fail("Input stream does not contain xsd");
+        } catch (final IllegalStateException e) {
+            assertTrue(e.getCause() instanceof SAXParseException);
+        }
+
+    }
+
+    @Test
+    public void testXPath() throws Exception {
+        final XPathExpression correctXPath = XMLNetconfUtil.compileXPath("/top/innerText");
+        try {
+            XMLNetconfUtil.compileXPath("!@(*&$!");
+            fail("Incorrect xpath should fail");
+        } catch (IllegalStateException e) {}
+        final Object value = XmlUtil.evaluateXPath(correctXPath, XmlUtil.readXmlToDocument("<top><innerText>value</innerText></top>"), XPathConstants.NODE);
+        assertEquals("value", ((Element) value).getTextContent());
+    }
+}
\ No newline at end of file
index 12c80fe70cac8d2c105eb4ac70d820a4575eb974..a2df680b07b1d911295d9aba7dfc6697c1491ee7 100644 (file)
@@ -14,8 +14,8 @@ import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
+//import javax.xml.bind.annotation.XmlElementWrapper;
 import java.io.Serializable;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 
@@ -34,7 +34,7 @@ import java.util.List;
  * healthmonitor_id   String
  * admin_state_up     Bool
  * status             String
- * members            List <String>
+ * members            List <NeutronLoadBalancerPoolMember>
  * http://docs.openstack.org/api/openstack-network/2.0/openstack-network.pdf
  */
 
@@ -71,13 +71,10 @@ public class NeutronLoadBalancerPool extends ConfigurationObject implements Seri
     @XmlElement (name="status")
     String loadBalancerPoolStatus;
 
-    @XmlElement (name="members")
-    List loadBalancerPoolMembers;
-
-    HashMap<String, NeutronLoadBalancerPoolMember> member;
+    @XmlElement(name="members")
+    List<NeutronLoadBalancerPoolMember> loadBalancerPoolMembers;
 
     public NeutronLoadBalancerPool() {
-        member = new HashMap<String, NeutronLoadBalancerPoolMember>();
     }
 
     public String getLoadBalancerPoolID() {
@@ -152,14 +149,27 @@ public class NeutronLoadBalancerPool extends ConfigurationObject implements Seri
         this.loadBalancerPoolStatus = loadBalancerPoolStatus;
     }
 
-    public List getLoadBalancerPoolMembers() {
+    public List<NeutronLoadBalancerPoolMember> getLoadBalancerPoolMembers() {
+        /*
+         * Update the pool_id of the member to that this.loadBalancerPoolID
+         */
+        for (NeutronLoadBalancerPoolMember member: loadBalancerPoolMembers)
+            member.setPoolID(loadBalancerPoolID);
         return loadBalancerPoolMembers;
     }
 
-    public void setLoadBalancerPoolMembers(List loadBalancerPoolMembers) {
+    public void setLoadBalancerPoolMembers(List<NeutronLoadBalancerPoolMember> loadBalancerPoolMembers) {
         this.loadBalancerPoolMembers = loadBalancerPoolMembers;
     }
 
+    public void addLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember) {
+        this.loadBalancerPoolMembers.add(loadBalancerPoolMember);
+    }
+
+    public void removeLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember) {
+        this.loadBalancerPoolMembers.remove(loadBalancerPoolMember);
+    }
+
     public NeutronLoadBalancerPool extractFields(List<String> fields) {
         NeutronLoadBalancerPool ans = new NeutronLoadBalancerPool();
         Iterator<String> i = fields.iterator();
@@ -198,4 +208,4 @@ public class NeutronLoadBalancerPool extends ConfigurationObject implements Seri
         }
         return ans;
     }
-}
\ No newline at end of file
+}
index 577c3bb5288f76bc7ccc67df7abd5b4bdf9bf585..683d45fcf2e81d373615a8f4b002d6b24c49f23b 100644 (file)
@@ -10,11 +10,18 @@ package org.opendaylight.controller.networkconfig.neutron;
 
 import org.opendaylight.controller.configuration.ConfigurationObject;
 
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlTransient;
 import java.io.Serializable;
 import java.util.Iterator;
 import java.util.List;
 
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
 public class NeutronLoadBalancerPoolMember  extends ConfigurationObject implements Serializable {
 
     private static final long serialVersionUID = 1L;
@@ -46,9 +53,20 @@ public class NeutronLoadBalancerPoolMember  extends ConfigurationObject implemen
     @XmlElement (name="status")
     String poolMemberStatus;
 
+    String poolID;
+
     public NeutronLoadBalancerPoolMember() {
     }
 
+    @XmlTransient
+    public String getPoolID() {
+        return poolID;
+    }
+
+    public void setPoolID(String poolID) {
+        this.poolID = poolID;
+    }
+
     public String getPoolMemberID() {
         return poolMemberID;
     }
@@ -121,6 +139,9 @@ public class NeutronLoadBalancerPoolMember  extends ConfigurationObject implemen
             if (s.equals("id")) {
                 ans.setPoolMemberID(this.getPoolMemberID());
             }
+            if (s.equals("pool_id")) {
+                ans.setPoolID(this.getPoolID());
+            }
             if (s.equals("tenant_id")) {
                 ans.setPoolMemberTenantID(this.getPoolMemberTenantID());
             }
@@ -148,6 +169,7 @@ public class NeutronLoadBalancerPoolMember  extends ConfigurationObject implemen
     @Override public String toString() {
         return "NeutronLoadBalancerPoolMember{" +
                 "poolMemberID='" + poolMemberID + '\'' +
+                ", poolID='" + poolID + '\'' +
                 ", poolMemberTenantID='" + poolMemberTenantID + '\'' +
                 ", poolMemberAddress='" + poolMemberAddress + '\'' +
                 ", poolMemberProtoPort=" + poolMemberProtoPort +
index 748dffc8cfc96c4947e35074c5e58014715dcc94..863b3cbdc7667874630d0be5918e9442f6d632d0 100644 (file)
@@ -38,8 +38,8 @@ import java.util.Iterator;
 import java.util.List;
 
 /**
- * Neutron Northbound REST APIs for LoadBalancer Policies.<br>
- * This class provides REST APIs for managing neutron LoadBalancer Policies
+ * Neutron Northbound REST APIs for LoadBalancers.<br>
+ * This class provides REST APIs for managing neutron LoadBalancers
  *
  * <br>
  * <br>
@@ -87,15 +87,13 @@ public class NeutronLoadBalancerNorthbound {
             @QueryParam("page_reverse") String pageReverse
             // sorting not supported
     ) {
-        INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
-                this);
-        //        INeutronLoadBalancerRuleCRUD firewallRuleInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerRuleCRUD(this);
+        INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(this);
 
-        if (loadBalancerPoolInterface == null) {
+        if (loadBalancerInterface == null) {
             throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
                     + RestMessages.SERVICEUNAVAILABLE.toString());
         }
-        List<NeutronLoadBalancer> allLoadBalancers = loadBalancerPoolInterface.getAllNeutronLoadBalancers();
+        List<NeutronLoadBalancer> allLoadBalancers = loadBalancerInterface.getAllNeutronLoadBalancers();
         //        List<NeutronLoadBalancerRule> allLoadBalancerRules = firewallRuleInterface.getAllNeutronLoadBalancerRules();
         List<NeutronLoadBalancer> ans = new ArrayList<NeutronLoadBalancer>();
         //        List<NeutronLoadBalancerRule> rules = new ArrayList<NeutronLoadBalancerRule>();
@@ -128,7 +126,7 @@ public class NeutronLoadBalancerNorthbound {
     /**
      * Returns a specific LoadBalancer */
 
-    @Path("{loadBalancerPoolID}")
+    @Path("{loadBalancerID}")
     @GET
     @Produces({ MediaType.APPLICATION_JSON })
 
@@ -137,25 +135,25 @@ public class NeutronLoadBalancerNorthbound {
             @ResponseCode(code = 401, condition = "Unauthorized"),
             @ResponseCode(code = 404, condition = "Not Found"),
             @ResponseCode(code = 501, condition = "Not Implemented") })
-    public Response showLoadBalancer(@PathParam("loadBalancerPoolID") String loadBalancerPoolID,
+    public Response showLoadBalancer(@PathParam("loadBalancerID") String loadBalancerID,
             // return fields
             @QueryParam("fields") List<String> fields) {
-        INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+        INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
                 this);
-        if (loadBalancerPoolInterface == null) {
+        if (loadBalancerInterface == null) {
             throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
                     + RestMessages.SERVICEUNAVAILABLE.toString());
         }
-        if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+        if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) {
             throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
         }
         if (fields.size() > 0) {
-            NeutronLoadBalancer ans = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+            NeutronLoadBalancer ans = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID);
             return Response.status(200).entity(
                     new NeutronLoadBalancerRequest(extractFields(ans, fields))).build();
         } else {
-            return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer(
-                    loadBalancerPoolID))).build();
+            return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerInterface.getNeutronLoadBalancer(
+                    loadBalancerID))).build();
         }
     }
 
@@ -175,9 +173,9 @@ public class NeutronLoadBalancerNorthbound {
             @ResponseCode(code = 409, condition = "Conflict"),
             @ResponseCode(code = 501, condition = "Not Implemented") })
     public Response createLoadBalancers(final NeutronLoadBalancerRequest input) {
-        INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+        INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
                 this);
-        if (loadBalancerPoolInterface == null) {
+        if (loadBalancerInterface == null) {
             throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
                     + RestMessages.SERVICEUNAVAILABLE.toString());
         }
@@ -187,11 +185,9 @@ public class NeutronLoadBalancerNorthbound {
             /*
              *  Verify that the LoadBalancer doesn't already exist.
              */
-            if (loadBalancerPoolInterface.neutronLoadBalancerExists(singleton.getLoadBalancerID())) {
+            if (loadBalancerInterface.neutronLoadBalancerExists(singleton.getLoadBalancerID())) {
                 throw new BadRequestException("LoadBalancer UUID already exists");
             }
-            loadBalancerPoolInterface.addNeutronLoadBalancer(singleton);
-
             Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
             if (instances != null) {
                 for (Object instance : instances) {
@@ -202,7 +198,7 @@ public class NeutronLoadBalancerNorthbound {
                     }
                 }
             }
-            loadBalancerPoolInterface.addNeutronLoadBalancer(singleton);
+            loadBalancerInterface.addNeutronLoadBalancer(singleton);
             if (instances != null) {
                 for (Object instance : instances) {
                     INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
@@ -218,10 +214,10 @@ public class NeutronLoadBalancerNorthbound {
                 NeutronLoadBalancer test = i.next();
 
                 /*
-                 *  Verify that the firewall policy doesn't already exist
+                 *  Verify that the loadbalancer doesn't already exist
                  */
 
-                if (loadBalancerPoolInterface.neutronLoadBalancerExists(test.getLoadBalancerID())) {
+                if (loadBalancerInterface.neutronLoadBalancerExists(test.getLoadBalancerID())) {
                     throw new BadRequestException("Load Balancer Pool UUID already is already created");
                 }
                 if (testMap.containsKey(test.getLoadBalancerID())) {
@@ -243,7 +239,7 @@ public class NeutronLoadBalancerNorthbound {
             i = bulk.iterator();
             while (i.hasNext()) {
                 NeutronLoadBalancer test = i.next();
-                loadBalancerPoolInterface.addNeutronLoadBalancer(test);
+                loadBalancerInterface.addNeutronLoadBalancer(test);
                 if (instances != null) {
                     for (Object instance : instances) {
                         INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
@@ -258,7 +254,7 @@ public class NeutronLoadBalancerNorthbound {
     /**
      * Updates a LoadBalancer Policy
      */
-    @Path("{loadBalancerPoolID}")
+    @Path("{loadBalancerID}")
     @PUT
     @Produces({ MediaType.APPLICATION_JSON })
     @Consumes({ MediaType.APPLICATION_JSON })
@@ -271,10 +267,10 @@ public class NeutronLoadBalancerNorthbound {
             @ResponseCode(code = 404, condition = "Not Found"),
             @ResponseCode(code = 501, condition = "Not Implemented") })
     public Response updateLoadBalancer(
-            @PathParam("loadBalancerPoolID") String loadBalancerPoolID, final NeutronLoadBalancerRequest input) {
-        INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+            @PathParam("loadBalancerID") String loadBalancerID, final NeutronLoadBalancerRequest input) {
+        INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
                 this);
-        if (loadBalancerPoolInterface == null) {
+        if (loadBalancerInterface == null) {
             throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
                     + RestMessages.SERVICEUNAVAILABLE.toString());
         }
@@ -282,14 +278,14 @@ public class NeutronLoadBalancerNorthbound {
         /*
          * verify the LoadBalancer exists and there is only one delta provided
          */
-        if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+        if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) {
             throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
         }
         if (!input.isSingleton()) {
             throw new BadRequestException("Only singleton edit supported");
         }
         NeutronLoadBalancer delta = input.getSingleton();
-        NeutronLoadBalancer original = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+        NeutronLoadBalancer original = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID);
 
         /*
          * updates restricted by Neutron
@@ -318,23 +314,23 @@ public class NeutronLoadBalancerNorthbound {
         /*
          * update the object and return it
          */
-        loadBalancerPoolInterface.updateNeutronLoadBalancer(loadBalancerPoolID, delta);
-        NeutronLoadBalancer updatedLoadBalancer = loadBalancerPoolInterface.getNeutronLoadBalancer(
-                loadBalancerPoolID);
+        loadBalancerInterface.updateNeutronLoadBalancer(loadBalancerID, delta);
+        NeutronLoadBalancer updatedLoadBalancer = loadBalancerInterface.getNeutronLoadBalancer(
+                loadBalancerID);
         if (instances != null) {
             for (Object instance : instances) {
                 INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
                 service.neutronLoadBalancerUpdated(updatedLoadBalancer);
             }
         }
-        return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer(
-                loadBalancerPoolID))).build();
+        return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerInterface.getNeutronLoadBalancer(
+                loadBalancerID))).build();
     }
 
     /**
      * Deletes a LoadBalancer */
 
-    @Path("{loadBalancerPoolID}")
+    @Path("{loadBalancerID}")
     @DELETE
     @StatusCodes({
             @ResponseCode(code = 204, condition = "No Content"),
@@ -343,10 +339,10 @@ public class NeutronLoadBalancerNorthbound {
             @ResponseCode(code = 409, condition = "Conflict"),
             @ResponseCode(code = 501, condition = "Not Implemented") })
     public Response deleteLoadBalancer(
-            @PathParam("loadBalancerPoolID") String loadBalancerPoolID) {
-        INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+            @PathParam("loadBalancerID") String loadBalancerID) {
+        INeutronLoadBalancerCRUD loadBalancerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
                 this);
-        if (loadBalancerPoolInterface == null) {
+        if (loadBalancerInterface == null) {
             throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
                     + RestMessages.SERVICEUNAVAILABLE.toString());
         }
@@ -354,13 +350,13 @@ public class NeutronLoadBalancerNorthbound {
         /*
          * verify the LoadBalancer exists and it isn't currently in use
          */
-        if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+        if (!loadBalancerInterface.neutronLoadBalancerExists(loadBalancerID)) {
             throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
         }
-        if (loadBalancerPoolInterface.neutronLoadBalancerInUse(loadBalancerPoolID)) {
+        if (loadBalancerInterface.neutronLoadBalancerInUse(loadBalancerID)) {
             return Response.status(409).build();
         }
-        NeutronLoadBalancer singleton = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+        NeutronLoadBalancer singleton = loadBalancerInterface.getNeutronLoadBalancer(loadBalancerID);
         Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
         if (instances != null) {
             for (Object instance : instances) {
@@ -372,7 +368,7 @@ public class NeutronLoadBalancerNorthbound {
             }
         }
 
-        loadBalancerPoolInterface.removeNeutronLoadBalancer(loadBalancerPoolID);
+        loadBalancerInterface.removeNeutronLoadBalancer(loadBalancerID);
         if (instances != null) {
             for (Object instance : instances) {
                 INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
@@ -12,7 +12,7 @@ import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool
 import javax.xml.bind.annotation.XmlElement;
 import java.util.List;
 
-public class INeutronLoadBalancerPoolMemberRequest {
+public class NeutronLoadBalancerPoolMemberRequest {
 
     /**
      * See OpenStack Network API v2.0 Reference for description of
@@ -25,15 +25,15 @@ public class INeutronLoadBalancerPoolMemberRequest {
     @XmlElement(name="members")
     List<NeutronLoadBalancerPoolMember> bulkRequest;
 
-    INeutronLoadBalancerPoolMemberRequest() {
+    NeutronLoadBalancerPoolMemberRequest() {
     }
 
-    INeutronLoadBalancerPoolMemberRequest(List<NeutronLoadBalancerPoolMember> bulk) {
+    NeutronLoadBalancerPoolMemberRequest(List<NeutronLoadBalancerPoolMember> bulk) {
         bulkRequest = bulk;
         singletonLoadBalancerPoolMember = null;
     }
 
-    INeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) {
+    NeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) {
         singletonLoadBalancerPoolMember = group;
     }
 
index ff56fa0a9d3786eaf17cc1848d7212c8054dab58..f8f3cd8c5311f685699257c6a665ef44f6ad5d5a 100644 (file)
@@ -1,46 +1,51 @@
 /*
- * Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2014 SDN Hub, LLC.
  *
  * This program and the accompanying materials are made available under the
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Authors : Srini Seetharaman
  */
 
 package org.opendaylight.controller.networkconfig.neutron.northbound;
 
 import org.codehaus.enunciate.jaxrs.ResponseCode;
 import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
 import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberAware;
-import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
 import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
 import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
 import org.opendaylight.controller.northbound.commons.RestMessages;
 import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
 import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
 import org.opendaylight.controller.sal.utils.ServiceHelper;
 
 import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
 import javax.ws.rs.GET;
 import javax.ws.rs.PUT;
 import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
 import javax.ws.rs.Produces;
 import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
+
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
 
-
-@Path("/pools/{loadBalancerPoolID}/members")
+@Path("/pools/{loadBalancerPoolUUID}/members")
 public class NeutronLoadBalancerPoolMembersNorthbound {
-
     private NeutronLoadBalancerPoolMember extractFields(NeutronLoadBalancerPoolMember o, List<String> fields) {
         return o.extractFields(fields);
     }
 /**
- * Returns a list of all LoadBalancerPool
+ * Returns a list of all LoadBalancerPoolMembers in specified pool
  */
 @GET
 @Produces({MediaType.APPLICATION_JSON})
@@ -50,8 +55,12 @@ public class NeutronLoadBalancerPoolMembersNorthbound {
         @ResponseCode(code = 501, condition = "Not Implemented")})
 
 public Response listMembers(
+        //Path param
+        @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+
         // return fields
         @QueryParam("fields") List<String> fields,
+
         // OpenStack LoadBalancerPool attributes
         @QueryParam("id") String queryLoadBalancerPoolMemberID,
         @QueryParam("tenant_id") String queryLoadBalancerPoolMemberTenantID,
@@ -68,20 +77,24 @@ public Response listMembers(
         @QueryParam("page_reverse") String pageReverse
         // sorting not supported
 ) {
-    INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces
-            .getINeutronLoadBalancerPoolMemberCRUD(this);
-    if (loadBalancerPoolMemberInterface == null) {
+    INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces
+            .getINeutronLoadBalancerPoolCRUD(this);
+    if (loadBalancerPoolInterface == null) {
         throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
                 + RestMessages.SERVICEUNAVAILABLE.toString());
     }
-    List<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = loadBalancerPoolMemberInterface
-            .getAllNeutronLoadBalancerPoolMembers();
+    if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+        throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+    }
+    List<NeutronLoadBalancerPoolMember> members =
+                loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers();
     List<NeutronLoadBalancerPoolMember> ans = new ArrayList<NeutronLoadBalancerPoolMember>();
-    Iterator<NeutronLoadBalancerPoolMember> i = allLoadBalancerPoolMembers.iterator();
+    Iterator<NeutronLoadBalancerPoolMember> i = members.iterator();
     while (i.hasNext()) {
         NeutronLoadBalancerPoolMember nsg = i.next();
         if ((queryLoadBalancerPoolMemberID == null ||
                 queryLoadBalancerPoolMemberID.equals(nsg.getPoolMemberID())) &&
+                loadBalancerPoolUUID.equals(nsg.getPoolID()) &&
                 (queryLoadBalancerPoolMemberTenantID == null ||
                         queryLoadBalancerPoolMemberTenantID.equals(nsg.getPoolMemberTenantID())) &&
                 (queryLoadBalancerPoolMemberAddress == null ||
@@ -102,13 +115,57 @@ public Response listMembers(
         }
     }
     return Response.status(200).entity(
-            new INeutronLoadBalancerPoolMemberRequest(ans)).build();
+            new NeutronLoadBalancerPoolMemberRequest(ans)).build();
+}
+
+/**
+ * Returns a specific LoadBalancerPoolMember
+ */
+
+@Path("{loadBalancerPoolMemberUUID}")
+@GET
+@Produces({ MediaType.APPLICATION_JSON })
+//@TypeHint(OpenStackLoadBalancerPoolMembers.class)
+@StatusCodes({
+    @ResponseCode(code = 200, condition = "Operation successful"),
+    @ResponseCode(code = 401, condition = "Unauthorized"),
+    @ResponseCode(code = 404, condition = "Not Found"),
+    @ResponseCode(code = 501, condition = "Not Implemented") })
+public Response showLoadBalancerPoolMember(
+        @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+        @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID,
+        // return fields
+        @QueryParam("fields") List<String> fields ) {
+
+    INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces
+            .getINeutronLoadBalancerPoolCRUD(this);
+    if (loadBalancerPoolInterface == null) {
+        throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+                + RestMessages.SERVICEUNAVAILABLE.toString());
+    }
+    if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+        throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+    }
+    List<NeutronLoadBalancerPoolMember> members =
+                loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers();
+    for (NeutronLoadBalancerPoolMember ans: members) {
+        if (!ans.getPoolMemberID().equals(loadBalancerPoolMemberUUID))
+            continue;
+
+        if (fields.size() > 0) {
+            return Response.status(200).entity(
+                new NeutronLoadBalancerPoolMemberRequest(extractFields(ans, fields))).build();
+        } else {
+            return Response.status(200).entity(
+                new NeutronLoadBalancerPoolMemberRequest(ans)).build();
+        }
+    }
+    return Response.status(204).build();
 }
 
 /**
  * Adds a Member to an LBaaS Pool member
  */
-@Path("/pools/{loadBalancerPoolID}/members")
 @PUT
 @Produces({MediaType.APPLICATION_JSON})
 @Consumes({MediaType.APPLICATION_JSON})
@@ -117,25 +174,34 @@ public Response listMembers(
         @ResponseCode(code = 401, condition = "Unauthorized"),
         @ResponseCode(code = 404, condition = "Not Found"),
         @ResponseCode(code = 501, condition = "Not Implemented")})
-public Response createLoadBalancerPoolMember(  INeutronLoadBalancerPoolMemberRequest input) {
+public Response createLoadBalancerPoolMember(
+        @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+        final NeutronLoadBalancerPoolMemberRequest input) {
 
-    INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD(
-            this);
-    if (loadBalancerPoolMemberInterface == null) {
-        throw new ServiceUnavailableException("LoadBalancerPoolMember CRUD Interface "
+    INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+    if (loadBalancerPoolInterface == null) {
+        throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
                 + RestMessages.SERVICEUNAVAILABLE.toString());
     }
+    // Verify that the loadBalancerPool exists, for the member to be added to its cache
+    if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+        throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+    }
+    NeutronLoadBalancerPool singletonPool = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID);
+
     if (input.isSingleton()) {
         NeutronLoadBalancerPoolMember singleton = input.getSingleton();
+        singleton.setPoolID(loadBalancerPoolUUID);
+        String loadBalancerPoolMemberUUID = singleton.getPoolMemberID();
 
         /*
          *  Verify that the LoadBalancerPoolMember doesn't already exist.
          */
-        if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists(
-                singleton.getPoolMemberID())) {
-            throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
+        List<NeutronLoadBalancerPoolMember> members = singletonPool.getLoadBalancerPoolMembers();
+        for (NeutronLoadBalancerPoolMember member: members) {
+            if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID))
+                throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
         }
-        loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton);
 
         Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
         if (instances != null) {
@@ -147,13 +213,18 @@ public Response createLoadBalancerPoolMember(  INeutronLoadBalancerPoolMemberReq
                 }
             }
         }
-        loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton);
         if (instances != null) {
             for (Object instance : instances) {
                 INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
                 service.neutronLoadBalancerPoolMemberCreated(singleton);
             }
         }
+
+        /**
+         * Add the member from the neutron load balancer pool as well
+         */
+        singletonPool.addLoadBalancerPoolMember(singleton);
+
     } else {
         List<NeutronLoadBalancerPoolMember> bulk = input.getBulk();
         Iterator<NeutronLoadBalancerPoolMember> i = bulk.iterator();
@@ -161,15 +232,17 @@ public Response createLoadBalancerPoolMember(  INeutronLoadBalancerPoolMemberReq
         Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
         while (i.hasNext()) {
             NeutronLoadBalancerPoolMember test = i.next();
+            String loadBalancerPoolMemberUUID = test.getPoolMemberID();
 
             /*
-             *  Verify that the firewall doesn't already exist
+             *  Verify that the LoadBalancerPoolMember doesn't already exist.
              */
-
-            if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists(
-                    test.getPoolMemberID())) {
-                throw new BadRequestException("Load Balancer PoolMember UUID already is already created");
+            List<NeutronLoadBalancerPoolMember> members = singletonPool.getLoadBalancerPoolMembers();
+            for (NeutronLoadBalancerPoolMember member: members) {
+                if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID))
+                    throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
             }
+
             if (testMap.containsKey(test.getPoolMemberID())) {
                 throw new BadRequestException("Load Balancer PoolMember UUID already exists");
             }
@@ -189,15 +262,105 @@ public Response createLoadBalancerPoolMember(  INeutronLoadBalancerPoolMemberReq
         i = bulk.iterator();
         while (i.hasNext()) {
             NeutronLoadBalancerPoolMember test = i.next();
-            loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(test);
             if (instances != null) {
                 for (Object instance : instances) {
                     INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
                     service.neutronLoadBalancerPoolMemberCreated(test);
                 }
             }
+            singletonPool.addLoadBalancerPoolMember(test);
         }
     }
     return Response.status(201).entity(input).build();
 }
+
+/**
+ * Updates a LB member pool
+ */
+
+@Path("{loadBalancerPoolMemberUUID}")
+@PUT
+@Produces({ MediaType.APPLICATION_JSON })
+@Consumes({ MediaType.APPLICATION_JSON })
+@StatusCodes({
+        @ResponseCode(code = 200, condition = "Operation successful"),
+        @ResponseCode(code = 400, condition = "Bad Request"),
+        @ResponseCode(code = 401, condition = "Unauthorized"),
+        @ResponseCode(code = 403, condition = "Forbidden"),
+        @ResponseCode(code = 404, condition = "Not Found"),
+        @ResponseCode(code = 501, condition = "Not Implemented") })
+public Response updateLoadBalancerPoolMember(
+        @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+        @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID,
+        final NeutronLoadBalancerPoolMemberRequest input) {
+
+    //TODO: Implement update LB member pool
+    return Response.status(501).entity(input).build();
+}
+
+/**
+ * Deletes a LoadBalancerPoolMember
+ */
+
+@Path("{loadBalancerPoolMemberUUID}")
+@DELETE
+@StatusCodes({
+    @ResponseCode(code = 204, condition = "No Content"),
+    @ResponseCode(code = 401, condition = "Unauthorized"),
+    @ResponseCode(code = 403, condition = "Forbidden"),
+    @ResponseCode(code = 404, condition = "Not Found"),
+    @ResponseCode(code = 501, condition = "Not Implemented") })
+public Response deleteLoadBalancerPoolMember(
+        @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID,
+        @PathParam("loadBalancerPoolMemberUUID") String loadBalancerPoolMemberUUID) {
+    INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+    if (loadBalancerPoolInterface == null) {
+        throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+                + RestMessages.SERVICEUNAVAILABLE.toString());
+    }
+
+    // Verify that the loadBalancerPool exists, for the member to be removed from its cache
+    if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+        throw new ResourceNotFoundException("loadBalancerPool UUID does not exist.");
+    }
+
+    //Verify that the LB pool member exists
+    NeutronLoadBalancerPoolMember singleton = null;
+    List<NeutronLoadBalancerPoolMember> members =
+            loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID).getLoadBalancerPoolMembers();
+    for (NeutronLoadBalancerPoolMember member: members) {
+        if (member.getPoolMemberID().equals(loadBalancerPoolMemberUUID)) {
+            singleton = member;
+            break;
+        }
+    }
+    if (singleton == null)
+        throw new BadRequestException("LoadBalancerPoolMember UUID does not exist.");
+
+    Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
+    if (instances != null) {
+        for (Object instance : instances) {
+            INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+            int status = service.canDeleteNeutronLoadBalancerPoolMember(singleton);
+            if (status < 200 || status > 299) {
+                return Response.status(status).build();
+            }
+        }
+    }
+
+    if (instances != null) {
+        for (Object instance : instances) {
+            INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+            service.neutronLoadBalancerPoolMemberDeleted(singleton);
+        }
+    }
+
+    /**
+     * Remove the member from the neutron load balancer pool
+     */
+    NeutronLoadBalancerPool singletonPool = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID);
+    singletonPool.removeLoadBalancerPoolMember(singleton);
+
+    return Response.status(204).build();
+}
 }
index fc5357ccb5f503680b5e5c5c4220118b8a86e1d0..7802dbb906287e9e18becc916aaa4ba0bb6dd5e3 100644 (file)
@@ -1,9 +1,11 @@
 /*
- * Copyright (C) 2014 Red Hat, Inc.
+ * Copyright (C) 2014 SDN Hub, LLC.
  *
  * This program and the accompanying materials are made available under the
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ * Authors : Srini Seetharaman
  */
 
 package org.opendaylight.controller.networkconfig.neutron.northbound;
@@ -13,8 +15,10 @@ import org.codehaus.enunciate.jaxrs.ResponseCode;
 import org.codehaus.enunciate.jaxrs.StatusCodes;
 import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolAware;
 import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
 import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
 import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
 import org.opendaylight.controller.northbound.commons.RestMessages;
 import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
 import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
@@ -22,6 +26,7 @@ import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailab
 import org.opendaylight.controller.sal.utils.ServiceHelper;
 
 import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
 import javax.ws.rs.GET;
 import javax.ws.rs.POST;
 import javax.ws.rs.PUT;
@@ -31,6 +36,7 @@ import javax.ws.rs.Produces;
 import javax.ws.rs.QueryParam;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
+
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
@@ -53,6 +59,13 @@ import java.util.List;
  * http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
  *
  */
+
+/**
+ * For now, the LB pool member data is maintained with the INeutronLoadBalancerPoolCRUD,
+ * although there may be an overlap with INeutronLoadBalancerPoolMemberCRUD's cache.
+ * TODO: Consolidate and maintain a single copy
+ */
+
 @Path("/pools")
 public class NeutronLoadBalancerPoolNorthbound {
 
@@ -83,7 +96,7 @@ public class NeutronLoadBalancerPoolNorthbound {
             @QueryParam("healthmonitor_id") String queryLoadBalancerPoolHealthMonitorID,
             @QueryParam("admin_state_up") String queryLoadBalancerIsAdminStateUp,
             @QueryParam("status") String queryLoadBalancerPoolStatus,
-            @QueryParam("members") List queryLoadBalancerPoolMembers,
+            @QueryParam("members") List<NeutronLoadBalancerPoolMember> queryLoadBalancerPoolMembers,
             // pagination
             @QueryParam("limit") String limit,
             @QueryParam("marker") String marker,
@@ -217,7 +230,7 @@ public class NeutronLoadBalancerPoolNorthbound {
                 NeutronLoadBalancerPool test = i.next();
 
                 /*
-                 *  Verify that the firewall doesn't already exist
+                 *  Verify that the loadBalancerPool doesn't already exist
                  */
 
                 if (loadBalancerPoolInterface.neutronLoadBalancerPoolExists(test.getLoadBalancerPoolID())) {
@@ -328,4 +341,73 @@ public class NeutronLoadBalancerPoolNorthbound {
         }
         return Response.status(200).entity(new NeutronLoadBalancerPoolRequest(loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID))).build();
     }
+
+    /**
+     * Deletes a LoadBalancerPool
+     */
+
+    @Path("{loadBalancerPoolUUID}")
+    @DELETE
+    @StatusCodes({
+            @ResponseCode(code = 204, condition = "No Content"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 409, condition = "Conflict"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response deleteLoadBalancerPool(
+            @PathParam("loadBalancerPoolUUID") String loadBalancerPoolUUID) {
+        INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+        if (loadBalancerPoolInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+
+        /*
+         * verify the LoadBalancerPool exists and it isn't currently in use
+         */
+        if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolUUID)) {
+            throw new ResourceNotFoundException("LoadBalancerPool UUID does not exist.");
+        }
+        if (loadBalancerPoolInterface.neutronLoadBalancerPoolInUse(loadBalancerPoolUUID)) {
+            return Response.status(409).build();
+        }
+        NeutronLoadBalancerPool singleton = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolUUID);
+        Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolAware.class, this, null);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+                int status = service.canDeleteNeutronLoadBalancerPool(singleton);
+                if (status < 200 || status > 299) {
+                    return Response.status(status).build();
+                }
+            }
+        }
+
+        /*
+         * remove it and return 204 status
+         */
+        loadBalancerPoolInterface.removeNeutronLoadBalancerPool(loadBalancerPoolUUID);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+                service.neutronLoadBalancerPoolDeleted(singleton);
+            }
+        }
+
+        /*
+         * remove corresponding members from the member cache too
+         */
+        INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD(this);
+        if (loadBalancerPoolMemberInterface != null) {
+            List<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = new
+                ArrayList<NeutronLoadBalancerPoolMember>(loadBalancerPoolMemberInterface.getAllNeutronLoadBalancerPoolMembers());
+            Iterator<NeutronLoadBalancerPoolMember> i = allLoadBalancerPoolMembers.iterator();
+            while (i.hasNext()) {
+                NeutronLoadBalancerPoolMember member = i.next();
+                if (member.getPoolID() == loadBalancerPoolUUID)
+                    loadBalancerPoolMemberInterface.removeNeutronLoadBalancerPoolMember(member.getPoolMemberID());
+            }
+        }
+        return Response.status(204).build();
+    }
 }
index 35ae71d0019ed2b0b1d4893c7e47901be6e906ea..987394402d7157f44a011c3d16ab77308855d8a6 100644 (file)
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others.  All rights reserved.
  *
  * This program and the accompanying materials are made available under the
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
@@ -190,8 +190,9 @@ public class ICMP extends Packet {
             end += rawPayload.length;
         }
         int checksumStartByte = start + getfieldOffset(CHECKSUM) / NetUtils.NumBitsInAByte;
+        int even = end & ~1;
 
-        for (int i = start; i <= (end - 1); i = i + 2) {
+        for (int i = start; i < even; i = i + 2) {
             // Skip, if the current bytes are checkSum bytes
             if (i == checksumStartByte) {
                 continue;
@@ -199,7 +200,13 @@ public class ICMP extends Packet {
             wordData = ((data[i] << 8) & 0xFF00) + (data[i + 1] & 0xFF);
             sum = sum + wordData;
         }
-        carry = (sum >> 16) & 0xFF;
+        if (even < end) {
+            // Add the last octet with zero padding.
+            wordData = (data[even] << 8) & 0xFF00;
+            sum = sum + wordData;
+        }
+
+        carry = sum >>> 16;
         finalSum = (sum & 0xFFFF) + carry;
         return (short) ~((short) finalSum & 0xFFFF);
     }
index 3363f423d695f1b2f090e6c5274715f47d765e3c..56793c41f6ef624efedd743b9682bb13bede8018 100644 (file)
@@ -260,7 +260,17 @@ public class IPv4 extends Packet {
      */
     public void setHeaderField(String headerField, byte[] readValue) {
         if (headerField.equals(PROTOCOL)) {
-            payloadClass = protocolClassMap.get(readValue[0]);
+            // Don't set payloadClass if framgment offset is not zero.
+            byte[] fragoff = hdrFieldsMap.get(FRAGOFFSET);
+            if (fragoff == null || BitBufferHelper.getShort(fragoff) == 0) {
+                payloadClass = protocolClassMap.get(readValue[0]);
+            }
+        } else if (headerField.equals(FRAGOFFSET)) {
+            if (readValue != null && BitBufferHelper.getShort(readValue) != 0) {
+                // Clear payloadClass because protocol header is not present
+                // in this packet.
+                payloadClass = null;
+            }
         } else if (headerField.equals(OPTIONS) &&
                    (readValue == null || readValue.length == 0)) {
             hdrFieldsMap.remove(headerField);
index e81fbf02cfafc4550ebfb5e5558d144e55d0696d..287b73ae3c22fda416fe21a8f003bc9d6e451312 100644 (file)
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others.  All rights reserved.
  *
  * This program and the accompanying materials are made available under the
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
@@ -9,6 +9,8 @@
 
 package org.opendaylight.controller.sal.packet;
 
+import java.util.Arrays;
+
 import junit.framework.Assert;
 
 import org.junit.Test;
@@ -74,28 +76,58 @@ public class ICMPTest {
                 (byte) 0x2b, (byte) 0x2c, (byte) 0x2d, (byte) 0x2e,
                 (byte) 0x2f, (byte) 0x30, (byte) 0x31, (byte) 0x32,
                 (byte) 0x33, (byte) 0x34, (byte) 0x35, (byte) 0x36, (byte) 0x37 };
+        serializeTest(icmpRawPayload, (short)0xe553);
+
+        serializeTest(null, (short)0xb108);
+        serializeTest(new byte[0], (short)0xb108);
+
+        byte[] odd = {
+            (byte)0xba, (byte)0xd4, (byte)0xc7, (byte)0x53,
+            (byte)0xf8, (byte)0x59, (byte)0x68, (byte)0x77,
+            (byte)0xfd, (byte)0x27, (byte)0xe0, (byte)0x5b,
+            (byte)0xd0, (byte)0x2e, (byte)0x28, (byte)0x41,
+            (byte)0xa3, (byte)0x48, (byte)0x5d, (byte)0x2e,
+            (byte)0x7d, (byte)0x5b, (byte)0xd3, (byte)0x60,
+            (byte)0xb3, (byte)0x88, (byte)0x8d, (byte)0x0f,
+            (byte)0x1d, (byte)0x87, (byte)0x51, (byte)0x0f,
+            (byte)0x6a, (byte)0xff, (byte)0xf7, (byte)0xd4,
+            (byte)0x40, (byte)0x35, (byte)0x4e, (byte)0x01,
+            (byte)0x36,
+        };
+        serializeTest(odd, (short)0xd0ad);
+
+        // Large payload that causes 16-bit checksum overflow more than
+        // 255 times.
+        byte[] largeEven = new byte[1024];
+        Arrays.fill(largeEven, (byte)0xff);
+        serializeTest(largeEven, (short)0xb108);
+
+        byte[] largeOdd = new byte[1021];
+        Arrays.fill(largeOdd, (byte)0xff);
+        serializeTest(largeOdd, (short)0xb207);
+    }
 
-        short checksum = (short)0xe553;
-
-        // Create ICMP object
+    private void serializeTest(byte[] payload, short checksum)
+        throws PacketException {
         ICMP icmp = new ICMP();
-        icmp.setType((byte)8);
-        icmp.setCode((byte)0);
-        icmp.setIdentifier((short) 0x46f5);
-        icmp.setSequenceNumber((short) 2);
-        icmp.setRawPayload(icmpRawPayload);
-        //icmp.setChecksum(checksum);
+        icmp.setType((byte)8).setCode((byte)0).
+            setIdentifier((short)0x46f5).setSequenceNumber((short)2);
+        int payloadSize = 0;
+        if (payload != null) {
+            icmp.setRawPayload(payload);
+            payloadSize = payload.length;
+        }
 
         // Serialize
-        byte[] stream = icmp.serialize();
-        Assert.assertTrue(stream.length == 64);
+        byte[] data = icmp.serialize();
+        Assert.assertEquals(payloadSize + 8, data.length);
 
         // Deserialize
         ICMP icmpDes = new ICMP();
-        icmpDes.deserialize(stream, 0, stream.length);
+        icmpDes.deserialize(data, 0, data.length);
 
         Assert.assertFalse(icmpDes.isCorrupted());
-        Assert.assertTrue(icmpDes.getChecksum() == checksum);
-        Assert.assertTrue(icmp.equals(icmpDes));
+        Assert.assertEquals(checksum, icmpDes.getChecksum());
+        Assert.assertEquals(icmp, icmpDes);
     }
 }
index f5298711b677a64ef9c80863580743ce2c394e52..b98342831cdf9256a01ae98698ae91babf0c2e97 100644 (file)
@@ -12,9 +12,9 @@ import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.Arrays;
 
-import junit.framework.Assert;
-
+import org.junit.Assert;
 import org.junit.Test;
+
 import org.opendaylight.controller.sal.match.Match;
 import org.opendaylight.controller.sal.match.MatchType;
 import org.opendaylight.controller.sal.utils.EtherTypes;
@@ -481,4 +481,200 @@ public class IPv4Test {
         Assert.assertEquals(protocol, (byte) match.getField(MatchType.NW_PROTO).getValue());
         Assert.assertEquals(tos, (byte) match.getField(MatchType.NW_TOS).getValue());
     }
+
+    @Test
+    public void testFragment() throws Exception {
+        byte[] payload1 = new byte[0];
+        byte[] payload2 = {
+            (byte)0x61, (byte)0xd1, (byte)0x3d, (byte)0x51,
+            (byte)0x1b, (byte)0x75, (byte)0xa7, (byte)0x83,
+        };
+        byte[] payload3 = {
+            (byte)0xe7, (byte)0x0f, (byte)0x2d, (byte)0x7e,
+            (byte)0x15, (byte)0xba, (byte)0xe7, (byte)0x6d,
+            (byte)0xb5, (byte)0xc5, (byte)0xb5, (byte)0x37,
+            (byte)0x59, (byte)0xbc, (byte)0x91, (byte)0x43,
+            (byte)0xb5, (byte)0xb7, (byte)0xe4, (byte)0x28,
+            (byte)0xec, (byte)0x62, (byte)0x6b, (byte)0x6a,
+            (byte)0xd1, (byte)0xcb, (byte)0x79, (byte)0x1e,
+            (byte)0xfc, (byte)0x82, (byte)0xf5, (byte)0xb4,
+        };
+
+        // Ensure that the payload is not deserialized if the fragment offset
+        // is not zero.
+        byte proto = IPProtocols.TCP.byteValue();
+        fragmentTest(payload1, proto, (short)0xf250);
+        fragmentTest(payload2, proto, (short)0xf248);
+        fragmentTest(payload3, proto, (short)0xf230);
+
+        proto = IPProtocols.UDP.byteValue();
+        fragmentTest(payload1, proto, (short)0xf245);
+        fragmentTest(payload2, proto, (short)0xf23d);
+        fragmentTest(payload3, proto, (short)0xf225);
+
+        proto = IPProtocols.ICMP.byteValue();
+        fragmentTest(payload1, proto, (short)0xf255);
+        fragmentTest(payload2, proto, (short)0xf24d);
+        fragmentTest(payload3, proto, (short)0xf235);
+
+        // Ensure that the protocol header in the first fragment is
+        // deserialized.
+        proto = IPProtocols.TCP.byteValue();
+        TCP tcp = new TCP();
+        tcp.setSourcePort((short)1234).setDestinationPort((short)32000).
+            setSequenceNumber((int)0xd541f5f8).setAckNumber((int)0x58da787d).
+            setDataOffset((byte)5).setReserved((byte)0).
+            setHeaderLenFlags((short)0x18).setWindowSize((short)0x40e8).
+            setUrgentPointer((short)0x15f7).setChecksum((short)0x0d4e);
+        firstFragmentTest(tcp, payload1, proto, (short)0xdfe6);
+        tcp.setChecksum((short)0xab2a);
+        firstFragmentTest(tcp, payload2, proto, (short)0xdfde);
+        tcp.setChecksum((short)0x1c75);
+        firstFragmentTest(tcp, payload3, proto, (short)0xdfc6);
+
+        proto = IPProtocols.UDP.byteValue();
+        UDP udp = new UDP();
+        udp.setSourcePort((short)53).setDestinationPort((short)45383).
+            setLength((short)(payload1.length + 8)).setChecksum((short)0);
+        firstFragmentTest(udp, payload1, proto, (short)0xdfe7);
+        udp.setLength((short)(payload2.length + 8));
+        firstFragmentTest(udp, payload2, proto, (short)0xdfdf);
+        udp.setLength((short)(payload3.length + 8));
+        firstFragmentTest(udp, payload3, proto, (short)0xdfc7);
+
+        proto = IPProtocols.ICMP.byteValue();
+        ICMP icmp = new ICMP();
+        icmp.setType((byte)8).setCode((byte)0).setIdentifier((short)0x3d1e).
+            setSequenceNumber((short)1);
+        firstFragmentTest(icmp, payload1, proto, (short)0xdff7);
+        firstFragmentTest(icmp, payload2, proto, (short)0xdfef);
+        firstFragmentTest(icmp, payload3, proto, (short)0xdfd7);
+    }
+
+    private void fragmentTest(byte[] payload, byte proto, short checksum)
+        throws Exception {
+        // Construct a fragmented raw IPv4 packet.
+        int ipv4Len = 20;
+        byte[] rawIp = new byte[ipv4Len + payload.length];
+
+        byte ipVersion = 4;
+        byte dscp = 35;
+        byte ecn = 2;
+        byte tos = (byte)((dscp << 2) | ecn);
+        short totalLen = (short)rawIp.length;
+        short id = 22143;
+        short offset = 0xb9;
+        byte ttl = 64;
+        byte[] srcIp = {(byte)0x0a, (byte)0x00, (byte)0x00, (byte)0x01};
+        byte[] dstIp = {(byte)0xc0, (byte)0xa9, (byte)0x66, (byte)0x23};
+
+        rawIp[0] = (byte)((ipVersion << 4) | (ipv4Len >> 2));
+        rawIp[1] = tos;
+        rawIp[2] = (byte)(totalLen >>> Byte.SIZE);
+        rawIp[3] = (byte)totalLen;
+        rawIp[4] = (byte)(id >>> Byte.SIZE);
+        rawIp[5] = (byte)id;
+        rawIp[6] = (byte)(offset >>> Byte.SIZE);
+        rawIp[7] = (byte)offset;
+        rawIp[8] = ttl;
+        rawIp[9] = proto;
+        rawIp[10] = (byte)(checksum >>> Byte.SIZE);
+        rawIp[11] = (byte)checksum;
+        System.arraycopy(srcIp, 0, rawIp, 12, srcIp.length);
+        System.arraycopy(dstIp, 0, rawIp, 16, srcIp.length);
+        System.arraycopy(payload, 0, rawIp, ipv4Len, payload.length);
+
+        // Deserialize.
+        IPv4 ipv4 = new IPv4();
+        ipv4.deserialize(rawIp, 0, rawIp.length * Byte.SIZE);
+
+        Assert.assertEquals(ipVersion, ipv4.getVersion());
+        Assert.assertEquals(ipv4Len, ipv4.getHeaderLen());
+        Assert.assertEquals(dscp, ipv4.getDiffServ());
+        Assert.assertEquals(ecn, ipv4.getECN());
+        Assert.assertEquals(totalLen, ipv4.getTotalLength());
+        Assert.assertEquals(id, ipv4.getIdentification());
+        Assert.assertEquals((byte)0, ipv4.getFlags());
+        Assert.assertEquals(offset, ipv4.getFragmentOffset());
+        Assert.assertEquals(ttl, ipv4.getTtl());
+        Assert.assertEquals(proto, ipv4.getProtocol());
+        Assert.assertEquals(checksum, ipv4.getChecksum());
+        Assert.assertEquals(NetUtils.byteArray4ToInt(srcIp),
+                            ipv4.getSourceAddress());
+        Assert.assertEquals(NetUtils.byteArray4ToInt(dstIp),
+                            ipv4.getDestinationAddress());
+        Assert.assertFalse(ipv4.isCorrupted());
+
+        // payloadClass should not be set if fragment offset is not zero.
+        Assert.assertEquals(null, ipv4.getPayload());
+        Assert.assertArrayEquals(payload, ipv4.getRawPayload());
+    }
+
+    private void firstFragmentTest(Packet payload, byte[] rawPayload,
+                                   byte proto, short checksum)
+        throws Exception {
+        // Construct a raw IPv4 packet with MF flag.
+        int ipv4Len = 20;
+        payload.setRawPayload(rawPayload);
+        byte[] payloadBytes = payload.serialize();
+        byte[] rawIp = new byte[ipv4Len + payloadBytes.length];
+
+        byte ipVersion = 4;
+        byte dscp = 13;
+        byte ecn = 1;
+        byte tos = (byte)((dscp << 2) | ecn);
+        short totalLen = (short)rawIp.length;
+        short id = 19834;
+        byte flags = 0x1;
+        short offset = 0;
+        short off = (short)(((short)flags << 13) | offset);
+        byte ttl = 64;
+        byte[] srcIp = {(byte)0xac, (byte)0x23, (byte)0x5b, (byte)0xfd};
+        byte[] dstIp = {(byte)0xc0, (byte)0xa8, (byte)0x64, (byte)0x71};
+
+        rawIp[0] = (byte)((ipVersion << 4) | (ipv4Len >> 2));
+        rawIp[1] = tos;
+        rawIp[2] = (byte)(totalLen >>> Byte.SIZE);
+        rawIp[3] = (byte)totalLen;
+        rawIp[4] = (byte)(id >>> Byte.SIZE);
+        rawIp[5] = (byte)id;
+        rawIp[6] = (byte)(off >>> Byte.SIZE);
+        rawIp[7] = (byte)off;
+        rawIp[8] = ttl;
+        rawIp[9] = proto;
+        rawIp[10] = (byte)(checksum >>> Byte.SIZE);
+        rawIp[11] = (byte)checksum;
+        System.arraycopy(srcIp, 0, rawIp, 12, srcIp.length);
+        System.arraycopy(dstIp, 0, rawIp, 16, srcIp.length);
+        System.arraycopy(payloadBytes, 0, rawIp, ipv4Len, payloadBytes.length);
+
+        // Deserialize.
+        IPv4 ipv4 = new IPv4();
+        ipv4.deserialize(rawIp, 0, rawIp.length * Byte.SIZE);
+
+        Assert.assertEquals(ipVersion, ipv4.getVersion());
+        Assert.assertEquals(ipv4Len, ipv4.getHeaderLen());
+        Assert.assertEquals(dscp, ipv4.getDiffServ());
+        Assert.assertEquals(ecn, ipv4.getECN());
+        Assert.assertEquals(totalLen, ipv4.getTotalLength());
+        Assert.assertEquals(id, ipv4.getIdentification());
+        Assert.assertEquals(flags, ipv4.getFlags());
+        Assert.assertEquals(offset, ipv4.getFragmentOffset());
+        Assert.assertEquals(ttl, ipv4.getTtl());
+        Assert.assertEquals(proto, ipv4.getProtocol());
+        Assert.assertEquals(checksum, ipv4.getChecksum());
+        Assert.assertEquals(NetUtils.byteArray4ToInt(srcIp),
+                            ipv4.getSourceAddress());
+        Assert.assertEquals(NetUtils.byteArray4ToInt(dstIp),
+                            ipv4.getDestinationAddress());
+        Assert.assertFalse(ipv4.isCorrupted());
+
+        // Protocol header in the first fragment should be deserialized.
+        Assert.assertEquals(null, ipv4.getRawPayload());
+
+        Packet desPayload = ipv4.getPayload();
+        Assert.assertEquals(payload, desPayload);
+        Assert.assertFalse(desPayload.isCorrupted());
+        Assert.assertArrayEquals(rawPayload, desPayload.getRawPayload());
+    }
 }
index b0e87c48f3e0812fd2e33c7789e5e916479f6465..659ee7dd81ca83d91c013ceddb7017edca9a8b1b 100644 (file)
@@ -8,25 +8,6 @@
 
 package org.opendaylight.controller.topologymanager.internal;
 
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.util.ArrayList;
-import java.util.Dictionary;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.CopyOnWriteArraySet;
-import java.util.concurrent.LinkedBlockingQueue;
-
 import org.apache.commons.lang3.tuple.ImmutablePair;
 import org.apache.felix.dm.Component;
 import org.eclipse.osgi.framework.console.CommandInterpreter;
@@ -64,6 +45,25 @@ import org.osgi.framework.FrameworkUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CopyOnWriteArraySet;
+import java.util.concurrent.LinkedBlockingQueue;
+
 /**
  * The class describes TopologyManager which is the central repository of the
  * network topology. It provides service for applications to interact with
@@ -654,12 +654,14 @@ public class TopologyManagerImpl implements
             // all except the creation time stamp because that should
             // be set only when the edge is created
             TimeStamp timeStamp = null;
-            for (Property prop : oldProps) {
-                if (prop instanceof TimeStamp) {
-                    TimeStamp tsProp = (TimeStamp) prop;
-                    if (tsProp.getTimeStampName().equals("creation")) {
-                        timeStamp = tsProp;
-                        break;
+            if (oldProps != null) {
+                for (Property prop : oldProps) {
+                    if (prop instanceof TimeStamp) {
+                        TimeStamp tsProp = (TimeStamp) prop;
+                        if (tsProp.getTimeStampName().equals("creation")) {
+                            timeStamp = tsProp;
+                            break;
+                        }
                     }
                 }
             }
@@ -679,7 +681,9 @@ public class TopologyManagerImpl implements
                 if (prop instanceof TimeStamp) {
                     TimeStamp t = (TimeStamp) prop;
                     if (t.getTimeStampName().equals("creation")) {
-                        i.remove();
+                        if (timeStamp != null) {
+                            i.remove();
+                        }
                         break;
                     }
                 }
index fa01fa6a6025f1dc4da35e0bda80b43f77a0388a..d1338bf6953909aff8ff1c4bea274001f9135e5c 100644 (file)
@@ -8,21 +8,11 @@
 
 package org.opendaylight.controller.topologymanager.internal;
 
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentMap;
-
 import org.junit.Assert;
 import org.junit.Test;
 import org.opendaylight.controller.sal.core.Bandwidth;
 import org.opendaylight.controller.sal.core.ConstructionException;
+import org.opendaylight.controller.sal.core.Description;
 import org.opendaylight.controller.sal.core.Edge;
 import org.opendaylight.controller.sal.core.Host;
 import org.opendaylight.controller.sal.core.Latency;
@@ -32,6 +22,7 @@ import org.opendaylight.controller.sal.core.NodeConnector;
 import org.opendaylight.controller.sal.core.NodeConnector.NodeConnectorIDType;
 import org.opendaylight.controller.sal.core.Property;
 import org.opendaylight.controller.sal.core.State;
+import org.opendaylight.controller.sal.core.TimeStamp;
 import org.opendaylight.controller.sal.core.UpdateType;
 import org.opendaylight.controller.sal.packet.address.EthernetAddress;
 import org.opendaylight.controller.sal.topology.TopoEdgeUpdate;
@@ -47,6 +38,17 @@ import org.opendaylight.controller.switchmanager.Switch;
 import org.opendaylight.controller.switchmanager.SwitchConfig;
 import org.opendaylight.controller.topologymanager.TopologyUserLinkConfig;
 
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
 public class TopologyManagerImplTest {
     /**
      * Mockup of switch manager that only maintains existence of node
@@ -733,4 +735,35 @@ public class TopologyManagerImplTest {
 
         Assert.assertTrue(nodeNCmap.isEmpty());
     }
+
+    @Test
+    public void bug1348FixTest() throws ConstructionException {
+        TopologyManagerImpl topoManagerImpl = new TopologyManagerImpl();
+        TestSwitchManager swMgr = new TestSwitchManager();
+        topoManagerImpl.setSwitchManager(swMgr);
+        topoManagerImpl.nonClusterObjectCreate();
+
+        NodeConnector headnc1 = NodeConnectorCreator.createOFNodeConnector(
+                (short) 1, NodeCreator.createOFNode(1000L));
+        NodeConnector tailnc1 = NodeConnectorCreator.createOFNodeConnector(
+                (short) 2, NodeCreator.createOFNode(2000L));
+        Edge edge = new Edge(headnc1, tailnc1);
+        List<TopoEdgeUpdate> updatedEdges = new ArrayList<>();
+        Set<Property> edgeProps = new HashSet<>();
+        edgeProps.add(new TimeStamp(System.currentTimeMillis(), "creation"));
+        edgeProps.add(new Latency(Latency.LATENCY100ns));
+        edgeProps.add(new State(State.EDGE_UP));
+        edgeProps.add(new Bandwidth(Bandwidth.BW100Gbps));
+        edgeProps.add(new Description("Test edge"));
+        updatedEdges.add(new TopoEdgeUpdate(edge, edgeProps, UpdateType.CHANGED));
+
+        try {
+            topoManagerImpl.edgeUpdate(updatedEdges);
+        } catch (Exception e) {
+            Assert.fail("Exception was raised when trying to update edge properties: " + e.getMessage());
+        }
+
+        Assert.assertEquals(1, topoManagerImpl.getEdges().size());
+        Assert.assertNotNull(topoManagerImpl.getEdges().get(edge));
+    }
 }