Merge "Bug-1338: Create a grouping for order to help create generic OrderComparator...
authorEd Warnicke <eaw@cisco.com>
Wed, 13 Aug 2014 14:40:16 +0000 (14:40 +0000)
committerGerrit Code Review <gerrit@opendaylight.org>
Wed, 13 Aug 2014 14:40:16 +0000 (14:40 +0000)
255 files changed:
features/config-netty/pom.xml
features/config-netty/src/main/resources/features.xml
features/config-persister/pom.xml
features/config-persister/src/main/resources/features.xml
features/config/pom.xml
features/config/src/main/resources/features.xml
features/flow/pom.xml [new file with mode: 0644]
features/flow/src/main/resources/features.xml [new file with mode: 0644]
features/mdsal/pom.xml
features/mdsal/src/main/resources/features.xml
features/netconf/pom.xml
features/netconf/src/main/resources/features.xml
features/pom.xml
features/protocol-framework/pom.xml
features/protocol-framework/src/main/resources/features.xml
opendaylight/commons/liblldp/pom.xml [new file with mode: 0644]
opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/BitBufferHelper.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/BufferException.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/ConstructionException.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/DataLinkAddress.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/EtherTypes.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/Ethernet.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/EthernetAddress.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/HexEncode.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/LLDP.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/LLDPTLV.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/NetUtils.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/Packet.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/PacketException.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/test/java/org/opendaylight/controller/sal/packet/BitBufferHelperTest.java [new file with mode: 0644]
opendaylight/commons/liblldp/src/test/java/org/opendaylight/controller/sal/packet/address/EthernetAddressTest.java [new file with mode: 0644]
opendaylight/commons/opendaylight/pom.xml
opendaylight/config/config-manager/src/main/java/org/opendaylight/controller/config/manager/impl/osgi/ConfigManagerActivator.java
opendaylight/config/config-persister-api/src/main/java/org/opendaylight/controller/config/persist/api/ConfigPusher.java [new file with mode: 0644]
opendaylight/config/config-persister-feature-adapter/pom.xml [new file with mode: 0644]
opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/ConfigPusherFeatureActivator.java [new file with mode: 0644]
opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/AbstractFeatureWrapper.java [new file with mode: 0644]
opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/ChildAwareFeatureWrapper.java [new file with mode: 0644]
opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/ConfigFeaturesListener.java [new file with mode: 0644]
opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/ConfigPusherCustomizer.java [new file with mode: 0644]
opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/ConfigPushingRunnable.java [new file with mode: 0644]
opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/FeatureConfigPusher.java [new file with mode: 0644]
opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/FeatureConfigSnapshotHolder.java [new file with mode: 0644]
opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/FeatureServiceCustomizer.java [new file with mode: 0644]
opendaylight/config/pom.xml
opendaylight/distribution/opendaylight-karaf/pom.xml
opendaylight/distribution/opendaylight-karaf/src/main/resources/configuration/initial/00-netty.xml [deleted file]
opendaylight/distribution/opendaylight-karaf/src/main/resources/configuration/initial/01-md-sal.xml [deleted file]
opendaylight/distribution/opendaylight-karaf/src/main/resources/configuration/initial/03-toaster-sample.xml [deleted file]
opendaylight/distribution/opendaylight-karaf/src/main/resources/configuration/logback.xml
opendaylight/distribution/opendaylight-karaf/src/main/resources/etc/custom.properties
opendaylight/distribution/opendaylight-karaf/src/main/resources/etc/jre.properties
opendaylight/distribution/opendaylight/pom.xml
opendaylight/distribution/opendaylight/src/main/resources/configuration/logback.xml
opendaylight/md-sal/md-sal-config/src/main/resources/initial/01-md-sal.xml
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/AbstractRaftActorBehavior.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Candidate.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Follower.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/Leader.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/behaviors/LeaderTest.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/AbstractReadWriteTransaction.java
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/ForwardedBackwardsCompatibleDataBrokerTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/test/DataBrokerTestCustomizer.java
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/sal/binding/test/util/BindingTestContext.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/PathUtils.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/protobuff/messages/transaction/ShardTransactionMessages.java
opendaylight/md-sal/sal-clustering-commons/src/main/resources/ShardTransaction.proto
opendaylight/md-sal/sal-clustering-commons/src/test/java/org/opendaylight/controller/cluster/datastore/node/NormalizedNodeToNodeCodecTest.java
opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/05-clustering.xml.conf
opendaylight/md-sal/sal-clustering-config/src/main/resources/initial/akka.conf
opendaylight/md-sal/sal-distributed-datastore/pom.xml
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractUntypedActor.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ClusterWrapperImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/CompositeModificationPayload.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ConfigurationImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListener.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardManager.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardReadTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardReadWriteTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardTransactionChain.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardWriteTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohort.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/exceptions/UnknownMessageException.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardIdentifier.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardManagerIdentifier.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardTransactionIdentifier.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/TransactionIdentifier.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/AbstractBaseMBean.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardMBeanFactory.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStats.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStatsMBean.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shardmanager/ShardManagerInfo.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shardmanager/ShardManagerInfoMBean.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/AbortTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CanCommitTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataChangeListenerRegistration.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CommitTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CreateTransactionChain.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExists.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExistsReply.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PeerAddressResolved.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PreCommitTransaction.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/PreCommitTransactionReply.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/InstanceIdentifierUtils.java
opendaylight/md-sal/sal-distributed-datastore/src/main/resources/application.conf
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/BasicIntegrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ConfigurationImplTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerRegistrationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/DistributedDataStoreTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionChainTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionFailureTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionProxyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardIdentifierTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardManagerIdentifierTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shard/ShardStatsTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/modification/AbstractModificationTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/resources/application.conf
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataReadTransaction.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/RpcImplementationUnavailableException.java [new file with mode: 0644]
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/config/yang/md/sal/dom/impl/DomInmemoryDataBrokerModule.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMDataCommitCoordinatorImpl.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadOnlyTransaction.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMForwardedReadWriteTransaction.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/BackwardsCompatibleMountPoint.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/RoutedRpcSelector.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/sal/dom/broker/impl/SchemaAwareRpcBroker.java
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMBrokerPerformanceTest.java
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMBrokerTest.java
opendaylight/md-sal/sal-dom-broker/src/test/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMTransactionChainTest.java
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/DOMStoreReadTransaction.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/config/yang/inmemory_datastore_provider/InMemoryConfigDataStoreProviderModule.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/config/yang/inmemory_datastore_provider/InMemoryOperationalDataStoreProviderModule.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/ChangeListenerNotifyTask.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStore.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreFactory.java [new file with mode: 0644]
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/ResolveDataChangeEventsTask.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedReadTransaction.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedReadWriteTransaction.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/tree/ListenerTree.java
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/AbstractDataChangeListenerTest.java
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/DatastoreTestTask.java
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/DefaultDataChangeListenerTestSuite.java
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDataStoreTest.java
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/RootScopeSubtreeTest.java
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/SchemaUpdateForTransactionTest.java
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/TestDCLExecutorService.java [new file with mode: 0644]
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/WildcardedScopeBaseTest.java
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/WildcardedScopeOneTest.java
opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/WildcardedScopeSubtreeTest.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/config/yang/md/sal/connector/netconf/NetconfConnectorModule.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/config/yang/md/sal/connector/netconf/NetconfConnectorModuleFactory.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/api/RemoteDeviceHandler.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/NetconfDevice.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/NetconfStateSchemas.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/NotificationHandler.java [new file with mode: 0644]
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/listener/NetconfDeviceCommunicator.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/sal/NetconfDeviceSalFacade.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/sal/tx/NetconfDeviceReadOnlyTx.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/sal/tx/NetconfDeviceReadWriteTx.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/schema/NetconfDeviceSchemaProviderFactory.java [deleted file]
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/schema/NetconfRemoteSchemaSourceProvider.java [deleted file]
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/schema/NetconfRemoteSchemaYangSourceProvider.java [new file with mode: 0644]
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/util/NetconfMessageTransformUtil.java
opendaylight/md-sal/sal-netconf-connector/src/test/java/org/opendaylight/controller/sal/connect/netconf/NetconfDeviceTest.java
opendaylight/md-sal/sal-remoterpc-connector/pom.xml
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/RpcManager.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/RoutingTable.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/RoutingTableOld.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistry.java
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistryOld.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Bucket.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketImpl.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStore.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Copier.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Gossiper.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Messages.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/RpcBrokerTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/RoutingTableOldTest.java [moved from opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/RoutingTableTest.java with 97% similarity]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistryOldTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistryTest.java
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStoreTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/gossip/GossiperTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-remoterpc-connector/src/test/resources/application.conf [new file with mode: 0644]
opendaylight/md-sal/sal-rest-connector-config/pom.xml
opendaylight/md-sal/sal-rest-connector-config/src/main/resources/initial/10-rest-connector.xml
opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/restconf/impl/BrokerFacade.java
opendaylight/md-sal/sal-rest-connector/src/main/java/org/opendaylight/controller/sal/restconf/impl/RestconfImpl.java
opendaylight/md-sal/sal-rest-connector/src/test/java/org/opendaylight/controller/sal/restconf/impl/test/BrokerFacadeTest.java
opendaylight/md-sal/sal-rest-docgen/pom.xml
opendaylight/md-sal/sal-rest-docgen/src/main/java/org/opendaylight/controller/sal/rest/doc/impl/ApiDocGenerator.java
opendaylight/md-sal/sal-rest-docgen/src/main/java/org/opendaylight/controller/sal/rest/doc/impl/BaseYangSwaggerGenerator.java
opendaylight/md-sal/sal-rest-docgen/src/main/java/org/opendaylight/controller/sal/rest/doc/impl/ModelGenerator.java
opendaylight/md-sal/sal-rest-docgen/src/main/java/org/opendaylight/controller/sal/rest/doc/mountpoints/MountPointSwagger.java
opendaylight/md-sal/sal-rest-docgen/src/main/java/org/opendaylight/controller/sal/rest/doc/util/RestDocgenUtil.java [new file with mode: 0644]
opendaylight/md-sal/sal-rest-docgen/src/test/java/org/opendaylight/controller/sal/rest/doc/impl/ApiDocGeneratorTest.java
opendaylight/md-sal/sal-rest-docgen/src/test/java/org/opendaylight/controller/sal/rest/doc/impl/DocGenTestHelper.java
opendaylight/md-sal/sal-rest-docgen/src/test/java/org/opendaylight/controller/sal/rest/doc/impl/MountPointSwaggerTest.java
opendaylight/md-sal/sal-rest-docgen/src/test/resources/yang/toaster_augmented.yang [new file with mode: 0644]
opendaylight/md-sal/sal-rest-docgen/src/test/resources/yang/toaster_short.yang
opendaylight/md-sal/topology-lldp-discovery/pom.xml
opendaylight/md-sal/topology-lldp-discovery/src/main/java/org/opendaylight/md/controller/topology/lldp/utils/LLDPDiscoveryUtils.java
opendaylight/netconf/config-persister-impl/src/main/java/org/opendaylight/controller/netconf/persist/impl/ConfigPusherImpl.java [moved from opendaylight/netconf/config-persister-impl/src/main/java/org/opendaylight/controller/netconf/persist/impl/ConfigPusher.java with 85% similarity]
opendaylight/netconf/config-persister-impl/src/main/java/org/opendaylight/controller/netconf/persist/impl/osgi/ConfigPersisterActivator.java
opendaylight/netconf/config-persister-impl/src/test/java/org/opendaylight/controller/netconf/persist/impl/osgi/MockedBundleContext.java
opendaylight/netconf/netconf-cli/src/main/java/org/opendaylight/controller/netconf/cli/NetconfDeviceConnectionHandler.java
opendaylight/netconf/pom.xml
opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/Activator.java
opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerHealthMonitorInterface.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerInterface.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerListenerInterface.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerPoolInterface.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerPoolMemberInterface.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerAware.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerCRUD.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerHealthMonitorAware.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerHealthMonitorCRUD.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerListenerAware.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerListenerCRUD.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolAware.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolCRUD.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolMemberAware.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolMemberCRUD.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolMemberRequest.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronCRUDInterfaces.java
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancer.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerHealthMonitor.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerListener.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPool.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPoolMember.java [new file with mode: 0644]
opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronPort.java
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/INeutronLoadBalancerPoolMemberRequest.java [new file with mode: 0644]
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerHealthMonitorNorthbound.java [new file with mode: 0644]
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerHealthMonitorRequest.java [new file with mode: 0644]
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerListenerNorthbound.java [new file with mode: 0644]
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerListenerRequest.java [new file with mode: 0644]
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerNorthbound.java [new file with mode: 0644]
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolMembersNorthbound.java [new file with mode: 0644]
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolNorthbound.java [new file with mode: 0644]
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolRequest.java [new file with mode: 0644]
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerRequest.java [new file with mode: 0644]
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronNorthboundRSApplication.java
opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronRoutersNorthbound.java
pom.xml

index 16fd975130592a1975332b6b390dbce2f9f1d614..2f4b4b1e2198bb4e85cd1c66f19fb6e009cf4626 100644 (file)
@@ -7,7 +7,7 @@
     <version>0.2.5-SNAPSHOT</version>
     <relativePath>../../opendaylight/config/</relativePath>
   </parent>
-  <artifactId>config-netty-features</artifactId>
+  <artifactId>features-config-netty</artifactId>
 
   <packaging>pom</packaging>
 
   <dependencies>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>config-persister-features</artifactId>
+      <artifactId>features-config-persister</artifactId>
       <classifier>features</classifier>
       <type>xml</type>
       <scope>runtime</scope>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netty-event-executor-config</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netty-threadgroup-config</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netty-timer-config</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>threadpool-config-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>threadpool-config-impl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>config-netty-config</artifactId>
+    </dependency>
   </dependencies>
 
   <build>
index f1b2d1f753f66cf83dd03ddca24cb6155fe7ab33..7f57d8cb84cbb3b5d103799646c8ea787f8e0d88 100644 (file)
@@ -3,7 +3,7 @@
 <features name="odl-config-persister-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
           xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
           xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
-  <repository>mvn:org.opendaylight.controller/config-persister-features/${config.version}/xml/features</repository>
+  <repository>mvn:org.opendaylight.controller/features-config-persister/${config.version}/xml/features</repository>
   <feature name='odl-config-netty' version='${project.version}'>
     <feature version='${project.version}'>odl-config-netty-config-api</feature>
     <bundle>mvn:org.opendaylight.controller/netty-event-executor-config/${project.version}</bundle>
@@ -12,6 +12,6 @@
     <bundle>mvn:org.opendaylight.controller/threadpool-config-api/${project.version}</bundle>
     <bundle>mvn:org.opendaylight.controller/threadpool-config-impl/${project.version}</bundle>
     <feature version='${project.version}'>odl-config-startup</feature>
-    <configfile finalname="configuration/initial/00-netty.xml">mvn:org.opendaylight.controller/config-netty-config/${config.version}/xml/config</configfile>
+    <configfile finalname="${config.configfile.directory}/${config.netty.configfile}">mvn:org.opendaylight.controller/config-netty-config/${config.version}/xml/config</configfile>
   </feature>
 </features>
\ No newline at end of file
index ec1520ed9818eb573942cde1ac6561110d2cb44f..6dc894134546fabde2a2d7ef2bf71d0673fab5b0 100644 (file)
@@ -7,7 +7,7 @@
     <version>0.2.5-SNAPSHOT</version>
     <relativePath>../../opendaylight/config/</relativePath>
   </parent>
-  <artifactId>config-persister-features</artifactId>
+  <artifactId>features-config-persister</artifactId>
 
   <packaging>pom</packaging>
 
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>netconf-features</artifactId>
+      <artifactId>features-netconf</artifactId>
       <classifier>features</classifier>
       <type>xml</type>
       <scope>runtime</scope>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>config-features</artifactId>
+      <artifactId>features-config</artifactId>
       <classifier>features</classifier>
       <type>xml</type>
       <scope>runtime</scope>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>config-persister-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>config-persister-file-xml-adapter</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>config-persister-impl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>config-persister-feature-adapter</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netconf-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netconf-mapping-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.persistence</groupId>
+      <artifactId>org.eclipse.persistence.core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.eclipse.persistence</groupId>
+      <artifactId>org.eclipse.persistence.moxy</artifactId>
+    </dependency>
   </dependencies>
 
   <build>
index 2273a4a3091db030bfd967e434234066c6ffcc62..a3c005b3bd8937425420c1eff652e89262657225 100644 (file)
@@ -4,22 +4,20 @@
           xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
           xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
   <repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
-  <repository>mvn:org.opendaylight.controller/netconf-features/${netconf.version}/xml/features</repository>
-  <repository>mvn:org.opendaylight.controller/config-features/${config.version}/xml/features</repository>
-  <feature name='odl-config-startup' version='${project.version}'>
-    <feature version='${project.version}'>odl-config-netconf-connector</feature>
+  <repository>mvn:org.opendaylight.controller/features-netconf/${netconf.version}/xml/features</repository>
+  <repository>mvn:org.opendaylight.controller/features-config/${config.version}/xml/features</repository>
+  <feature name='odl-config-all' version='${project.version}'>
     <feature version='${project.version}'>odl-config-persister</feature>
-    <feature version='${project.version}'>odl-netconf-impl</feature>
+    <feature version='${project.version}'>odl-config-startup</feature>
   </feature>
   <feature name='odl-config-persister' version='${project.version}'>
     <feature version='${netconf.version}'>odl-netconf-api</feature>
     <feature version='${project.version}'>odl-config-api</feature>
-    <feature version='${yangtools.version}'>yangtools-binding-generator</feature>
+    <feature version='${yangtools.version}'>odl-yangtools-binding-generator</feature>
     <bundle>mvn:org.opendaylight.controller/config-persister-api/${project.version}</bundle>
     <bundle>mvn:org.opendaylight.controller/config-persister-file-xml-adapter/${project.version}</bundle>
-    <bundle>mvn:org.opendaylight.controller/config-persister-directory-xml-adapter/${project.version}</bundle>
     <bundle>mvn:org.opendaylight.controller/config-persister-impl/${project.version}</bundle>
-
+    <bundle>mvn:org.opendaylight.controller/config-persister-feature-adapter/${project.version}</bundle>
     <bundle>mvn:org.opendaylight.controller/netconf-util/${netconf.version}</bundle>
     <bundle>mvn:org.opendaylight.controller/netconf-mapping-api/${netconf.version}</bundle>
 
@@ -29,4 +27,9 @@
     <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.core/${eclipse.persistence.version}</bundle>
     <bundle>mvn:org.eclipse.persistence/org.eclipse.persistence.moxy/${eclipse.persistence.version}</bundle>
   </feature>
-</features>
\ No newline at end of file
+  <feature name='odl-config-startup' version='${project.version}'>
+    <feature version='${project.version}'>odl-config-netconf-connector</feature>
+    <feature version='${project.version}'>odl-config-persister</feature>
+    <feature version='${project.version}'>odl-netconf-impl</feature>
+  </feature>
+</features>
index 7e5dd6472bc7320a6b7c20953c811fb41982cb26..c69e11bed2c91e2a08d936dbffad02c601b12d7f 100644 (file)
@@ -7,7 +7,7 @@
     <version>0.2.5-SNAPSHOT</version>
     <relativePath>../../opendaylight/config/</relativePath>
   </parent>
-  <artifactId>config-features</artifactId>
+  <artifactId>features-config</artifactId>
 
   <packaging>pom</packaging>
 
       <type>xml</type>
       <scope>runtime</scope>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-common-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-common-impl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-common-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>config-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netty-config-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-transport</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-buffer</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>config-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>yang-jmx-generator</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>shutdown-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>shutdown-impl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.osgi</groupId>
+      <artifactId>org.osgi.core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.javassist</groupId>
+      <artifactId>javassist</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>config-manager</artifactId>
+    </dependency>
   </dependencies>
 
   <build>
index de5b198173f6c88da2c5983c2b2daae9c989ea6d..6c0d32427da9508ccd39463e89e716e603bd2101 100644 (file)
@@ -5,45 +5,52 @@
           xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
   <repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
 
-  <feature name='odl-config-core' version='${project.version}'>
-    <feature version='${yangtools.version}'>yangtools-concepts</feature>
-    <feature version='${yangtools.version}'>yangtools-binding</feature>
-    <feature version='${yangtools.version}'>yangtools-binding-generator</feature>
-    <feature version='${mdsal.version}'>odl-mdsal-commons</feature>
-    <feature version='${project.version}'>odl-config-api</feature>
-    <bundle>mvn:org.opendaylight.controller/config-util/${project.version}</bundle>
-    <bundle>mvn:org.opendaylight.controller/yang-jmx-generator/${project.version}</bundle>
-    <bundle>mvn:org.opendaylight.controller/shutdown-api/${project.version}</bundle>
-    <bundle>mvn:org.opendaylight.controller/shutdown-impl/${project.version}</bundle>
-    <bundle>mvn:org.osgi/org.osgi.core/${osgi.core.version}</bundle>
-    <bundle>mvn:com.google.guava/guava/${guava.version}</bundle>
-    <bundle>mvn:org.javassist/javassist/${javassist.version}</bundle>
+  <feature name='odl-config-all' version='${project.version}'>
+    <feature version='${project.version}'>odl-mdsal-common</feature>
+      <feature version='${project.version}'>odl-config-api</feature>
+      <feature version='${project.version}'>odl-config-netty-config-api</feature>
+      <feature version='${project.version}'>odl-config-core</feature>
+      <feature version='${project.version}'>odl-config-manager</feature>
   </feature>
-  <feature name='odl-config-manager' version='${project.version}'>
-    <feature version='${project.version}'>odl-config-core</feature>
-    <bundle>mvn:org.opendaylight.controller/config-manager/${project.version}</bundle>
+
+  <feature name='odl-mdsal-common' version='${mdsal.version}'>
+      <feature version='${yangtools.version}'>odl-yangtools-data-binding</feature>
+      <bundle>mvn:org.opendaylight.controller/sal-common/${mdsal.version}</bundle>
+      <bundle>mvn:org.opendaylight.controller/sal-common-api/${mdsal.version}</bundle>
+      <bundle>mvn:org.opendaylight.controller/sal-common-impl/${mdsal.version}</bundle>
+      <bundle>mvn:org.opendaylight.controller/sal-common-util/${mdsal.version}</bundle>
   </feature>
 
   <feature name='odl-config-api' version='${project.version}'>
     <bundle>mvn:org.opendaylight.controller/config-api/${project.version}</bundle>
-
-    <!-- yangtools features -->
-    <feature version='${yangtools.version}'>yangtools-concepts</feature>
-    <feature version='${yangtools.version}'>yangtools-binding</feature>
+    <feature version='${yangtools.version}'>odl-yangtools-common</feature>
+    <feature version='${yangtools.version}'>odl-yangtools-binding</feature>
   </feature>
 
   <feature name='odl-config-netty-config-api' version='${project.version}'>
+    <feature version='${project.version}'>odl-config-api</feature>
     <bundle>mvn:org.opendaylight.controller/netty-config-api/${project.version}</bundle>
-
-    <!-- netty bundles -->
     <bundle>mvn:io.netty/netty-transport/${netty.version}</bundle>
     <bundle>mvn:io.netty/netty-common/${netty.version}</bundle>
     <bundle>mvn:io.netty/netty-buffer/${netty.version}</bundle>
+  </feature>
 
+  <feature name='odl-config-core' version='${project.version}'>
+    <feature version='${yangtools.version}'>odl-yangtools-common</feature>
+    <feature version='${yangtools.version}'>odl-yangtools-binding</feature>
+    <feature version='${yangtools.version}'>odl-yangtools-binding-generator</feature>
+    <feature version='${mdsal.version}'>odl-mdsal-common</feature>
     <feature version='${project.version}'>odl-config-api</feature>
+    <bundle>mvn:org.opendaylight.controller/config-util/${project.version}</bundle>
+    <bundle>mvn:org.opendaylight.controller/yang-jmx-generator/${project.version}</bundle>
+    <bundle>mvn:org.opendaylight.controller/shutdown-api/${project.version}</bundle>
+    <bundle>mvn:org.opendaylight.controller/shutdown-impl/${project.version}</bundle>
+    <bundle>mvn:org.osgi/org.osgi.core/${osgi.core.version}</bundle>
+    <bundle>mvn:com.google.guava/guava/${guava.version}</bundle>
+    <bundle>mvn:org.javassist/javassist/${javassist.version}</bundle>
   </feature>
-  <feature name='odl-config-dispatcher' version='${project.version}'>
-      <bundle>mvn:org.opendaylight.controller/netconf-config-dispatcher/${project.version}</bundle>
+  <feature name='odl-config-manager' version='${project.version}'>
+    <feature version='${project.version}'>odl-config-core</feature>
+    <bundle>mvn:org.opendaylight.controller/config-manager/${project.version}</bundle>
   </feature>
-
 </features>
\ No newline at end of file
diff --git a/features/flow/pom.xml b/features/flow/pom.xml
new file mode 100644 (file)
index 0000000..09bb6c9
--- /dev/null
@@ -0,0 +1,120 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>sal-parent</artifactId>
+    <version>1.1-SNAPSHOT</version>
+    <relativePath>../../opendaylight/md-sal</relativePath>
+  </parent>
+  <artifactId>features-flow</artifactId>
+
+  <packaging>pom</packaging>
+
+  <properties>
+    <features.file>features.xml</features.file>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>features-mdsal</artifactId>
+      <version>${mdsal.version}</version>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.model</groupId>
+      <artifactId>model-flow-base</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.model</groupId>
+      <artifactId>model-flow-service</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.model</groupId>
+      <artifactId>model-flow-statistics</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.model</groupId>
+      <artifactId>model-inventory</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.model</groupId>
+      <artifactId>model-topology</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.md</groupId>
+      <artifactId>topology-manager</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.md</groupId>
+      <artifactId>topology-lldp-discovery</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.md</groupId>
+      <artifactId>statistics-manager</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.md</groupId>
+      <artifactId>inventory-manager</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.md</groupId>
+      <artifactId>forwardingrules-manager</artifactId>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <resources>
+      <resource>
+        <filtering>true</filtering>
+        <directory>src/main/resources</directory>
+      </resource>
+    </resources>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-resources-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>filter</id>
+            <goals>
+              <goal>resources</goal>
+            </goals>
+            <phase>generate-resources</phase>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>attach-artifacts</id>
+            <goals>
+              <goal>attach-artifact</goal>
+            </goals>
+            <phase>package</phase>
+            <configuration>
+              <artifacts>
+                <artifact>
+                  <file>${project.build.directory}/classes/${features.file}</file>
+                  <type>xml</type>
+                  <classifier>features</classifier>
+                </artifact>
+              </artifacts>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+  <scm>
+    <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+    <tag>HEAD</tag>
+    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL</url>
+  </scm>
+</project>
diff --git a/features/flow/src/main/resources/features.xml b/features/flow/src/main/resources/features.xml
new file mode 100644 (file)
index 0000000..3f914be
--- /dev/null
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<features name="odl-flow-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
+   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+   xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
+    <repository>mvn:org.opendaylight.controller/features-mdsal/${mdsal.version}/xml/features</repository>
+    <feature name='odl-flow-model' version='${project.version}'>
+        <feature version='${yangtools.version}'>odl-yangtools-models</feature>
+        <bundle>mvn:org.opendaylight.controller.model/model-flow-base/${project.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller.model/model-flow-service/${project.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller.model/model-flow-statistics/${project.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller.model/model-inventory/${project.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller.model/model-topology/${project.version}</bundle>
+    </feature>
+    <feature name='odl-flow-services' version='${project.version}'>
+        <feature version='${project.version}'>odl-mdsal-broker</feature>
+        <feature version='${project.version}'>odl-flow-model</feature>
+        <bundle>mvn:org.opendaylight.controller.md/topology-manager/${project.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller.md/topology-lldp-discovery/${project.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller.md/statistics-manager/${project.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller.md/inventory-manager/${project.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller.md/forwardingrules-manager/${project.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller/liblldp/${sal.version}</bundle>
+    </feature>
+
+</features>
index 2983c5efab8fb21031991fe6af248032eb842365..4f1ba98e5c5d4f60cc4d0a5a2fcbe7c06706d429 100644 (file)
@@ -7,7 +7,7 @@
     <version>1.1-SNAPSHOT</version>
     <relativePath>../../opendaylight/md-sal</relativePath>
   </parent>
-  <artifactId>mdsal-features</artifactId>
+  <artifactId>features-mdsal</artifactId>
 
   <packaging>pom</packaging>
 
     <features.file>features.xml</features.file>
   </properties>
 
-  <dependencies></dependencies>
+  <dependencies>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>features-yangtools</artifactId>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>features-config</artifactId>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>features-config-persister</artifactId>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>features-config-netty</artifactId>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-core-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-core-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-core-spi</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-broker-impl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-binding-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-binding-config</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-binding-broker-impl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-binding-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-connector-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-inmemory-datastore</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>md-sal-config</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-netconf-connector</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.model</groupId>
+      <artifactId>model-inventory</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netconf-config-dispatcher</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netconf-connector-config</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-rest-connector</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.code.gson</groupId>
+      <artifactId>gson</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.sun.jersey</groupId>
+      <artifactId>jersey-server</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.thirdparty</groupId>
+      <artifactId>com.sun.jersey.jersey-servlet</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-buffer</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-codec</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-codec-http</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-handler</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-transport</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-remote</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>sal-rest-connector-config</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.samples</groupId>
+      <artifactId>sample-toaster</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.samples</groupId>
+      <artifactId>sample-toaster-provider</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.samples</groupId>
+      <artifactId>sample-toaster-consumer</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.samples</groupId>
+      <artifactId>toaster-config</artifactId>
+    </dependency>
+  </dependencies>
 
   <build>
     <resources>
index 7d393bc64cdc7aab9056043d8b1f852260dcfe27..a3d7ed0f83df6455006a4c94dbb52283d1438461 100644 (file)
@@ -1,28 +1,24 @@
 <?xml version="1.0" encoding="UTF-8"?>
 
-<features name="mdsal-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
+<features name="odl-mdsal-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
+    <repository>mvn:org.opendaylight.yangtools/features-yangtools/${yangtools.version}/xml/features</repository>
+    <repository>mvn:org.opendaylight.controller/features-config/${config.version}/xml/features</repository>
+    <repository>mvn:org.opendaylight.controller/features-config-persister/${config.version}/xml/features</repository>
+    <repository>mvn:org.opendaylight.controller/features-config-netty/${config.version}/xml/features</repository>
     <feature name='odl-mdsal-all' version='${project.version}'>
-        <feature version='${project.version}'>odl-mdsal-commons</feature>
         <feature version='${project.version}'>odl-mdsal-broker</feature>
-        <feature version='${project.version}'>odl-mdsal-restconf</feature>
-    </feature>
-    <feature name='odl-mdsal-commons' version='${project.version}'>
-        <feature version='${yangtools.version}'>yangtools-data-binding</feature>
-        <bundle>mvn:org.opendaylight.controller/sal-common/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller/sal-common-api/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller/sal-common-impl/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller/sal-common-util/${project.version}</bundle>
+        <feature version='${project.version}'>odl-mdsal-netconf-connector</feature>
+        <feature version='${project.version}'>odl-restconf</feature>
+        <feature version='${project.version}'>odl-toaster</feature>
     </feature>
     <feature name='odl-mdsal-broker' version='${project.version}'>
-        <feature version='${yangtools.version}'>yangtools-concepts</feature>
-        <feature version='${yangtools.version}'>yangtools-binding</feature>
-        <feature version='${mdsal.version}'>odl-mdsal-commons</feature>
-        <feature version='${config.version}'>odl-config-core</feature>
-        <feature version='${config.version}'>odl-config-manager</feature>
-        <feature version='${config.version}'>odl-config-api</feature>
-        <feature version='${config.version}'>odl-config-persister</feature>
+        <feature version='${yangtools.version}'>odl-yangtools-common</feature>
+        <feature version='${yangtools.version}'>odl-yangtools-binding</feature>
+        <feature version='${mdsal.version}'>odl-mdsal-common</feature>
+        <feature version='${config.version}'>odl-config-startup</feature>
+        <feature version='${config.version}'>odl-config-netty</feature>
         <bundle>mvn:org.opendaylight.controller/sal-core-api/${project.version}</bundle>
         <bundle>mvn:org.opendaylight.controller/sal-core-spi/${project.version}</bundle>
         <bundle>mvn:org.opendaylight.controller/sal-broker-impl/${project.version}</bundle>
         <bundle>mvn:org.opendaylight.controller/sal-binding-util/${project.version}</bundle>
         <bundle>mvn:org.opendaylight.controller/sal-connector-api/${project.version}</bundle>
         <bundle>mvn:org.opendaylight.controller/sal-inmemory-datastore/${project.version}</bundle>
+        <configfile finalname="${config.configfile.directory}/${config.mdsal.configfile}">mvn:org.opendaylight.controller/md-sal-config/${mdsal.version}/xml/config</configfile>
+    </feature>
+    <feature name='odl-mdsal-netconf-connector' version='${project.version}'>
+        <feature version='${project.version}'>odl-mdsal-broker</feature>
+        <feature version='${netconf.version}'>odl-netconf-client</feature>
+        <feature version='${yangtools.version}'>odl-yangtools-models</feature>
+        <bundle>mvn:org.opendaylight.controller/sal-netconf-connector/${project.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller.model/model-inventory/${project.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller/netconf-config-dispatcher/${config.version}</bundle>
+        <configfile finalname="${config.configfile.directory}/${config.netconf.connector.configfile}">mvn:org.opendaylight.controller/netconf-connector-config/${netconf.version}/xml/config</configfile>
     </feature>
-    <feature name='odl-mdsal-restconf' version='${project.version}'>
+    <feature name='odl-restconf' version='${project.version}'>
         <feature version='${mdsal.version}'>odl-mdsal-broker</feature>
+        <feature>war</feature>
         <bundle>mvn:org.opendaylight.controller/sal-rest-connector/${project.version}</bundle>
-        <bundle>wrap:mvn:com.google.code.gson/gson/${gson.version}</bundle>
-        <bundle>wrap:mvn:com.sun.jersey/jersey-core/${jersey.version}</bundle>
-        <bundle>wrap:mvn:com.sun.jersey/jersey-server/${jersey.version}</bundle>
+        <bundle>mvn:com.google.code.gson/gson/${gson.version}</bundle>
+        <bundle>mvn:com.sun.jersey/jersey-core/${jersey.version}</bundle>
+        <bundle>mvn:com.sun.jersey/jersey-server/${jersey.version}</bundle>
         <bundle>mvn:org.opendaylight.controller.thirdparty/com.sun.jersey.jersey-servlet/${jersey.version}</bundle>
-        <bundle>wrap:mvn:io.netty/netty-buffer/${netty.version}</bundle>
-        <bundle>wrap:mvn:io.netty/netty-codec/${netty.version}</bundle>
-        <bundle>wrap:mvn:io.netty/netty-codec-http/${netty.version}</bundle>
-        <bundle>wrap:mvn:io.netty/netty-common/${netty.version}</bundle>
-        <bundle>wrap:mvn:io.netty/netty-handler/${netty.version}</bundle>
-        <bundle>wrap:mvn:io.netty/netty-transport/${netty.version}</bundle>
-    </feature>
-    <feature name='odl-mdsal-model' version='${project.version}'>
-        <bundle>mvn:org.opendaylight.controller.model/model-flow-base/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller.model/model-flow-management/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller.model/model-flow-service/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller.model/model-flow-statistics/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller.model/model-inventory/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller.model/model-topology/${project.version}</bundle>
+        <bundle>mvn:io.netty/netty-buffer/${netty.version}</bundle>
+        <bundle>mvn:io.netty/netty-codec/${netty.version}</bundle>
+        <bundle>mvn:io.netty/netty-codec-http/${netty.version}</bundle>
+        <bundle>mvn:io.netty/netty-common/${netty.version}</bundle>
+        <bundle>mvn:io.netty/netty-handler/${netty.version}</bundle>
+        <bundle>mvn:io.netty/netty-transport/${netty.version}</bundle>
+        <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
+        <configfile finalname="${config.configfile.directory}/${config.restconf.configfile}">mvn:org.opendaylight.controller/sal-rest-connector-config/${mdsal.version}/xml/config</configfile>
     </feature>
-    <feature name='odl-mdsal-toaster' version='${project.version}'>
-        <feature version='${yangtools.version}'>yangtools-concepts</feature>
-        <feature version='${yangtools.version}'>yangtools-binding</feature>
+    <feature name='odl-toaster' version='${project.version}'>
+        <feature version='${yangtools.version}'>odl-yangtools-common</feature>
+        <feature version='${yangtools.version}'>odl-yangtools-binding</feature>
         <feature version='${project.version}'>odl-mdsal-broker</feature>
-        <feature version='${project.version}'>odl-mdsal-all</feature>
         <bundle>mvn:org.opendaylight.controller.samples/sample-toaster/${project.version}</bundle>
         <bundle>mvn:org.opendaylight.controller.samples/sample-toaster-consumer/${project.version}</bundle>
         <bundle>mvn:org.opendaylight.controller.samples/sample-toaster-provider/${project.version}</bundle>
+        <configfile finalname="${config.configfile.directory}/${config.toaster.configfile}">mvn:org.opendaylight.controller.samples/toaster-config/${project.version}/xml/config</configfile>
     </feature>
-    <feature name='odl-mdsal-misc' version='${project.version}'>
-        <bundle>mvn:org.opendaylight.controller/sal-netconf-connector/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller/sal-restconf-broker/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller/sal-remote/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller.md/topology-manager/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller.md/topology-lldp-discovery/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller.md/statistics-manager/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller.md/inventory-manager/${project.version}</bundle>
-        <bundle>mvn:org.opendaylight.controller.md/forwardingrules-manager/${project.version}</bundle>
-    </feature>
-
 </features>
index 856557c1e8245ef24ada6cdb63e9ad4c775b696e..956a67e28b3652d05ebb0547d5e3e456bc135dbd 100644 (file)
@@ -7,7 +7,7 @@
     <version>0.2.5-SNAPSHOT</version>
     <relativePath>../../opendaylight/netconf</relativePath>
   </parent>
-  <artifactId>netconf-features</artifactId>
+  <artifactId>features-netconf</artifactId>
 
   <packaging>pom</packaging>
 
   <dependencies>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>config-features</artifactId>
+      <artifactId>features-config</artifactId>
       <classifier>features</classifier>
       <type>xml</type>
       <scope>runtime</scope>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>features-odl-protocol-framework</artifactId>
+      <artifactId>features-protocol-framework</artifactId>
       <classifier>features</classifier>
       <type>xml</type>
       <scope>runtime</scope>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netconf-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>ietf-netconf-monitoring</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>ietf-netconf-monitoring-extension</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools.model</groupId>
+      <artifactId>ietf-inet-types</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools.model</groupId>
+      <artifactId>ietf-yang-types</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netconf-mapping-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netconf-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netconf-impl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>config-netconf-connector</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netconf-netty-util</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller.thirdparty</groupId>
+      <artifactId>ganymed</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.openexi</groupId>
+      <artifactId>nagasena</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-codec</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-handler</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-common</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-buffer</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-transport</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netconf-client</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netconf-config</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>netconf-monitoring</artifactId>
+    </dependency>
   </dependencies>
 
   <build>
index 50a537b50ae2eee799e706e60dd8acfc6985a61d..0033b0d83c7e4bf02aea300b62cf0ee69be350ba 100644 (file)
@@ -3,14 +3,24 @@
 <features name="odl-netconf-${project.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
           xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
           xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
-  <repository>mvn:org.opendaylight.controller/features-odl-protocol-framework/${protocol-framework.version}/xml/features</repository>
-  <repository>mvn:org.opendaylight.controller/config-features/${config.version}/xml/features</repository>
+  <repository>mvn:org.opendaylight.controller/features-protocol-framework/${protocol-framework.version}/xml/features</repository>
+  <repository>mvn:org.opendaylight.controller/features-config/${config.version}/xml/features</repository>
+  <feature name='odl-netconf-all' version='${project.version}'>
+    <feature version='${project.version}'>odl-netconf-api</feature>
+    <feature version='${project.version}'>odl-netconf-mapping-api</feature>
+    <feature version='${project.version}'>odl-netconf-util</feature>
+    <feature version='${project.version}'>odl-netconf-impl</feature>
+    <feature version='${project.version}'>odl-config-netconf-connector</feature>
+    <feature version='${project.version}'>odl-netconf-netty-util</feature>
+    <feature version='${project.version}'>odl-netconf-client</feature>
+    <feature version='${project.version}'>odl-netconf-monitoring</feature>
+  </feature>
 
   <feature name='odl-netconf-api' version='${project.version}'>
+    <feature version='${protocol-framework.version}'>odl-protocol-framework</feature>
     <bundle>mvn:org.opendaylight.controller/netconf-api/${project.version}</bundle>
     <bundle>mvn:org.opendaylight.controller/ietf-netconf-monitoring/${project.version}</bundle>
     <bundle>mvn:org.opendaylight.controller/ietf-netconf-monitoring-extension/${project.version}</bundle>
-    <feature version='${protocol-framework.version}'>odl-protocol-framework</feature>
     <bundle>mvn:org.opendaylight.yangtools.model/ietf-inet-types/${ietf-inet-types.version}</bundle>
     <bundle>mvn:org.opendaylight.yangtools.model/ietf-yang-types/${ietf-yang-types.version}</bundle>
   </feature>
     <feature version='${project.version}'>odl-netconf-mapping-api</feature>
     <bundle>mvn:org.opendaylight.controller/netconf-util/${project.version}</bundle>
   </feature>
-  <feature name='odl-config-netconf-connector' version='${project.version}'>
-    <feature version='${config.version}'>odl-config-manager</feature>
-    <bundle>mvn:org.opendaylight.controller/config-netconf-connector/${project.version}</bundle>
+    <feature name='odl-netconf-impl' version='${project.version}'>
     <feature version='${project.version}'>odl-netconf-api</feature>
     <feature version='${project.version}'>odl-netconf-mapping-api</feature>
     <feature version='${project.version}'>odl-netconf-util</feature>
-  </feature>
-
-  <feature name='odl-netconf-impl' version='${project.version}'>
+    <feature version='${project.version}'>odl-netconf-netty-util</feature>
     <bundle>mvn:org.opendaylight.controller/netconf-impl/${project.version}</bundle>
+  </feature>
+  <feature name='odl-config-netconf-connector' version='${project.version}'>
+    <feature version='${config.version}'>odl-config-manager</feature>
     <feature version='${project.version}'>odl-netconf-api</feature>
     <feature version='${project.version}'>odl-netconf-mapping-api</feature>
     <feature version='${project.version}'>odl-netconf-util</feature>
-    <feature version='${project.version}'>odl-netconf-netty-util</feature>
+    <bundle>mvn:org.opendaylight.controller/config-netconf-connector/${project.version}</bundle>
   </feature>
   <feature name='odl-netconf-netty-util' version='${project.version}'>
-    <bundle>mvn:org.opendaylight.controller/netconf-netty-util/${project.version}</bundle>
     <feature version='${project.version}'>odl-netconf-api</feature>
     <feature version='${project.version}'>odl-netconf-mapping-api</feature>
     <feature version='${project.version}'>odl-netconf-util</feature>
+    <bundle>mvn:org.opendaylight.controller/netconf-netty-util/${project.version}</bundle>
     <bundle>mvn:org.opendaylight.controller.thirdparty/ganymed/${ganymed.version}</bundle>
     <bundle>mvn:org.openexi/nagasena/${exi.nagasena.version}</bundle>
     <bundle>mvn:io.netty/netty-codec/${netty.version}</bundle>
     <bundle>mvn:io.netty/netty-buffer/${netty.version}</bundle>
     <bundle>mvn:io.netty/netty-transport/${netty.version}</bundle>
   </feature>
-  <feature name='odl-netconf-misc' version='${project.version}'>
+  <feature name='odl-netconf-client' version="${project.version}">
+    <feature version='${project.version}'>odl-netconf-netty-util</feature>
     <bundle>mvn:org.opendaylight.controller/netconf-client/${project.version}</bundle>
+    <configfile finalname="${config.configfile.directory}/${config.netconf.client.configfile}">mvn:org.opendaylight.controller/netconf-config/${netconf.version}/xml/config</configfile>
+  </feature>
+  <feature name='odl-netconf-monitoring' version='${project.version}'>
+    <feature version='${project.version}'>odl-netconf-util</feature>
     <bundle>mvn:org.opendaylight.controller/netconf-monitoring/${project.version}</bundle>
-    <bundle>mvn:org.opendaylight.controller/netconf-tcp/${project.version}</bundle>
   </feature>
 
 </features>
\ No newline at end of file
index f69190cebd4e5bd45fc0d11fc674305f65a0589c..88ed7491a75d922646dcdfcf6f43cad77392be00 100644 (file)
@@ -22,6 +22,7 @@
     <module>config-persister</module>
     <module>config-netty</module>
     <module>mdsal</module>
+    <module>flow</module>
     <module>netconf</module>
     <module>protocol-framework</module>
   </modules>
index ba5dd18fc21615f3e52ddcbbe6f91c2323cc33d5..97836be4552f88e4ad960981ebea186e664a090f 100644 (file)
@@ -7,7 +7,7 @@
     <version>1.4.2-SNAPSHOT</version>
     <relativePath>../../opendaylight/commons/opendaylight</relativePath>
   </parent>
-  <artifactId>features-odl-protocol-framework</artifactId>
+  <artifactId>features-protocol-framework</artifactId>
   <version>${protocol-framework.version}</version>
   <packaging>pom</packaging>
 
   <dependencies>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>config-features</artifactId>
+      <artifactId>features-config</artifactId>
       <classifier>features</classifier>
       <type>xml</type>
       <scope>runtime</scope>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>protocol-framework</artifactId>
+    </dependency>
   </dependencies>
 
   <build>
index d2560f5cb046f3d70a905c37a6ed520ca3854552..6daa3432c14285d8746bcd7385b08e520213f330 100644 (file)
@@ -3,10 +3,10 @@
 <features name="odl-protocol-framework-${protocol-framework.version}" xmlns="http://karaf.apache.org/xmlns/features/v1.2.0"
           xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
           xsi:schemaLocation="http://karaf.apache.org/xmlns/features/v1.2.0 http://karaf.apache.org/xmlns/features/v1.2.0">
-  <repository>mvn:org.opendaylight.controller/config-features/${config.version}/xml/features</repository>
+  <repository>mvn:org.opendaylight.controller/features-config/${config.version}/xml/features</repository>
   <feature name='odl-protocol-framework' version='${project.version}'>
+    <feature version='${config.version}'>odl-config-api</feature>
+    <feature version='${config.version}'>odl-config-netty-config-api</feature>
     <bundle>mvn:org.opendaylight.controller/protocol-framework/${protocol-framework.version}</bundle>
-    <feature version='${config.version}'>odl-config-api</feature> <!-- needed by netty-config-api -->
-    <feature version='${config.version}'>odl-config-netty-config-api</feature> <!-- needed by netty-config-api -->
   </feature>
 </features>
\ No newline at end of file
diff --git a/opendaylight/commons/liblldp/pom.xml b/opendaylight/commons/liblldp/pom.xml
new file mode 100644 (file)
index 0000000..1551041
--- /dev/null
@@ -0,0 +1,55 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.opendaylight.controller</groupId>
+    <artifactId>commons.opendaylight</artifactId>
+    <version>1.4.2-SNAPSHOT</version>
+    <relativePath>../opendaylight</relativePath>
+  </parent>
+
+  <artifactId>liblldp</artifactId>
+  <version>0.8.1-SNAPSHOT</version>
+  <packaging>bundle</packaging>
+  <dependencies>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.felix</groupId>
+        <artifactId>maven-bundle-plugin</artifactId>
+        <extensions>true</extensions>
+        <configuration>
+          <instructions>
+            <Import-Package>org.slf4j,
+              org.apache.commons.lang3.builder,
+              org.apache.commons.lang3.tuple
+            </Import-Package>
+            <Export-Package>
+              org.opendaylight.controller.liblldp</Export-Package>
+          </instructions>
+          <manifestLocation>${project.basedir}/META-INF</manifestLocation>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+  <scm>
+    <connection>scm:git:ssh://git.opendaylight.org:29418/controller.git</connection>
+    <developerConnection>scm:git:ssh://git.opendaylight.org:29418/controller.git</developerConnection>
+    <tag>HEAD</tag>
+    <url>https://wiki.opendaylight.org/view/OpenDaylight_Controller:Main</url>
+  </scm>
+</project>
diff --git a/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/BitBufferHelper.java b/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/BitBufferHelper.java
new file mode 100644 (file)
index 0000000..3eae432
--- /dev/null
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+/**
+ *
+ */
+package org.opendaylight.controller.liblldp;
+
+import java.util.Arrays;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * BitBufferHelper class that provides utility methods to
+ * - fetch specific bits from a serialized stream of bits
+ * - convert bits to primitive data type - like short, int, long
+ * - store bits in specified location in stream of bits
+ * - convert primitive data types to stream of bits
+ */
+public abstract class BitBufferHelper {
+    protected static final Logger logger = LoggerFactory
+    .getLogger(BitBufferHelper.class);
+
+    public static final long ByteMask = 0xFF;
+
+    // Getters
+    // data: array where data are stored
+    // startOffset: bit from where to start reading
+    // numBits: number of bits to read
+    // All this function return an exception if overflow or underflow
+
+    /**
+     * Returns the first byte from the byte array
+     * @param byte[] data
+     * @return byte value
+     */
+    public static byte getByte(byte[] data) {
+        if ((data.length * NetUtils.NumBitsInAByte) > Byte.SIZE) {
+            try {
+                throw new BufferException(
+                        "Container is too small for the number of requested bits");
+            } catch (BufferException e) {
+                logger.error("", e);
+            }
+        }
+        return (data[0]);
+    }
+
+    /**
+     * Returns the short value for the byte array passed.
+     * Size of byte array is restricted to Short.SIZE
+     * @param byte[] data
+     * @return short value
+     */
+    public static short getShort(byte[] data) {
+        if (data.length > Short.SIZE) {
+            try {
+                throw new BufferException(
+                        "Container is too small for the number of requested bits");
+            } catch (BufferException e) {
+                logger.error("", e);
+            }
+        }
+        return (short) toNumber(data);
+    }
+
+    /**
+     * Returns the int value for the byte array passed.
+     * Size of byte array is restricted to Integer.SIZE
+     * @param byte[] data
+     * @return int - the integer value of byte array
+     */
+    public static int getInt(byte[] data) {
+        if (data.length > Integer.SIZE) {
+            try {
+                throw new BufferException(
+                        "Container is too small for the number of requested bits");
+            } catch (BufferException e) {
+                logger.error("", e);
+            }
+        }
+        return (int) toNumber(data);
+    }
+
+    /**
+     * Returns the long value for the byte array passed.
+     * Size of byte array is restricted to Long.SIZE
+     * @param byte[] data
+     * @return long - the integer value of byte array
+     */
+    public static long getLong(byte[] data) {
+        if (data.length > Long.SIZE) {
+            try {
+                throw new BufferException(
+                        "Container is too small for the number of requested bits");
+            } catch (Exception e) {
+                logger.error("", e);
+            }
+        }
+        return (long) toNumber(data);
+    }
+
+    /**
+     * Returns the short value for the last numBits of the byte array passed.
+     * Size of numBits is restricted to Short.SIZE
+     * @param byte[] data
+     * @param int - numBits
+     * @return short - the short value of byte array
+     */
+    public static short getShort(byte[] data, int numBits) {
+        if (numBits > Short.SIZE) {
+            try {
+                throw new BufferException(
+                        "Container is too small for the number of requested bits");
+            } catch (BufferException e) {
+                logger.error("", e);
+            }
+        }
+        int startOffset = data.length * NetUtils.NumBitsInAByte - numBits;
+        byte[] bits = null;
+        try {
+            bits = BitBufferHelper.getBits(data, startOffset, numBits);
+        } catch (BufferException e) {
+            logger.error("", e);
+        }
+        return (short) toNumber(bits, numBits);
+    }
+
+    /**
+     * Returns the int value for the last numBits of the byte array passed.
+     * Size of numBits is restricted to Integer.SIZE
+     * @param byte[] data
+     * @param int - numBits
+     * @return int - the integer value of byte array
+     */
+    public static int getInt(byte[] data, int numBits) {
+        if (numBits > Integer.SIZE) {
+            try {
+                throw new BufferException(
+                        "Container is too small for the number of requested bits");
+            } catch (BufferException e) {
+                logger.error("", e);
+            }
+        }
+        int startOffset = data.length * NetUtils.NumBitsInAByte - numBits;
+        byte[] bits = null;
+        try {
+            bits = BitBufferHelper.getBits(data, startOffset, numBits);
+        } catch (BufferException e) {
+            logger.error("", e);
+        }
+        return (int) toNumber(bits, numBits);
+    }
+
+    /**
+     * Returns the long value for the last numBits of the byte array passed.
+     * Size of numBits is restricted to Long.SIZE
+     * @param byte[] data
+     * @param int - numBits
+     * @return long - the integer value of byte array
+     */
+    public static long getLong(byte[] data, int numBits) {
+        if (numBits > Long.SIZE) {
+            try {
+                throw new BufferException(
+                        "Container is too small for the number of requested bits");
+            } catch (BufferException e) {
+                logger.error("", e);
+            }
+        }
+        if (numBits > data.length * NetUtils.NumBitsInAByte) {
+            try {
+                throw new BufferException(
+                        "Trying to read more bits than contained in the data buffer");
+            } catch (BufferException e) {
+                logger.error("", e);
+            }
+        }
+        int startOffset = data.length * NetUtils.NumBitsInAByte - numBits;
+        byte[] bits = null;
+        try {
+            bits = BitBufferHelper.getBits(data, startOffset, numBits);
+        } catch (BufferException e) {
+            logger.error("", e);
+        }
+        return (long) toNumber(bits, numBits);
+    }
+
+    /**
+     * Reads the specified number of bits from the passed byte array
+     * starting to read from the specified offset
+     * The bits read are stored in a byte array which size is dictated
+     * by the number of bits to be stored.
+     * The bits are stored in the byte array LSB aligned.
+     *
+     * Ex.
+     * Read 7 bits at offset 10
+     * 0         9 10     16 17
+     * 0101000010 | 0000101 | 1111001010010101011
+     * will be returned as {0,0,0,0,0,1,0,1}
+     *
+     * @param byte[] data
+     * @param int startOffset - offset to start fetching bits from data from
+     * @param int numBits - number of bits to be fetched from data
+     * @return byte [] - LSB aligned bits
+     *
+     * @throws BufferException
+     *             when the startOffset and numBits parameters are not congruent
+     *             with the data buffer size
+     */
+    public static byte[] getBits(byte[] data, int startOffset, int numBits)
+            throws BufferException {
+
+        int startByteOffset = 0;
+        int valfromcurr, valfromnext;
+        int extranumBits = numBits % NetUtils.NumBitsInAByte;
+        int extraOffsetBits = startOffset % NetUtils.NumBitsInAByte;
+        int numBytes = (numBits % NetUtils.NumBitsInAByte != 0) ? 1 + numBits
+                / NetUtils.NumBitsInAByte : numBits / NetUtils.NumBitsInAByte;
+        byte[] shiftedBytes = new byte[numBytes];
+        startByteOffset = startOffset / NetUtils.NumBitsInAByte;
+        byte[] bytes = new byte[numBytes];
+        if (numBits == 0) {
+            return bytes;
+        }
+
+        checkExceptions(data, startOffset, numBits);
+
+        if (extraOffsetBits == 0) {
+            if (extranumBits == 0) {
+                System.arraycopy(data, startByteOffset, bytes, 0, numBytes);
+                return bytes;
+            } else {
+                System.arraycopy(data, startByteOffset, bytes, 0, numBytes - 1);
+                bytes[numBytes - 1] = (byte) ((int) data[startByteOffset
+                        + numBytes - 1] & getMSBMask(extranumBits));
+            }
+        } else {
+            int i;
+            for (i = 0; i < numBits / NetUtils.NumBitsInAByte; i++) {
+                // Reading numBytes starting from offset
+                valfromcurr = (data[startByteOffset + i])
+                        & getLSBMask(NetUtils.NumBitsInAByte - extraOffsetBits);
+                valfromnext = (data[startByteOffset + i + 1])
+                        & getMSBMask(extraOffsetBits);
+                bytes[i] = (byte) (valfromcurr << (extraOffsetBits) | (valfromnext >> (NetUtils.NumBitsInAByte - extraOffsetBits)));
+            }
+            // Now adding the rest of the bits if any
+            if (extranumBits != 0) {
+                if (extranumBits < (NetUtils.NumBitsInAByte - extraOffsetBits)) {
+                    valfromnext = (byte) (data[startByteOffset + i] & ((getMSBMask(extranumBits)) >> extraOffsetBits));
+                    bytes[i] = (byte) (valfromnext << extraOffsetBits);
+                } else if (extranumBits == (NetUtils.NumBitsInAByte - extraOffsetBits)) {
+                    valfromcurr = (data[startByteOffset + i])
+                            & getLSBMask(NetUtils.NumBitsInAByte
+                                    - extraOffsetBits);
+                    bytes[i] = (byte) (valfromcurr << extraOffsetBits);
+                } else {
+                    valfromcurr = (data[startByteOffset + i])
+                            & getLSBMask(NetUtils.NumBitsInAByte
+                                    - extraOffsetBits);
+                    valfromnext = (data[startByteOffset + i + 1])
+                            & (getMSBMask(extranumBits
+                                    - (NetUtils.NumBitsInAByte - extraOffsetBits)));
+                    bytes[i] = (byte) (valfromcurr << (extraOffsetBits) | (valfromnext >> (NetUtils.NumBitsInAByte - extraOffsetBits)));
+                }
+
+            }
+        }
+        // Aligns the bits to LSB
+        shiftedBytes = shiftBitsToLSB(bytes, numBits);
+        return shiftedBytes;
+    }
+
+    // Setters
+    // data: array where data will be stored
+    // input: the data that need to be stored in the data array
+    // startOffset: bit from where to start writing
+    // numBits: number of bits to read
+
+    /**
+     * Bits are expected to be stored in the input byte array from LSB
+     * @param byte[] - data to set the input byte
+     * @param byte - input byte to be inserted
+     * @param startOffset - offset of data[] to start inserting byte from
+     * @param numBits - number of bits of input to be inserted into data[]
+     *
+     * @throws BufferException
+     *             when the input, startOffset and numBits are not congruent
+     *             with the data buffer size
+     */
+    public static void setByte(byte[] data, byte input, int startOffset,
+            int numBits) throws BufferException {
+        byte[] inputByteArray = new byte[1];
+        Arrays.fill(inputByteArray, 0, 1, input);
+        setBytes(data, inputByteArray, startOffset, numBits);
+    }
+
+    /**
+     * Bits are expected to be stored in the input byte array from LSB
+     * @param byte[] - data to set the input byte
+     * @param byte[] - input bytes to be inserted
+     * @param startOffset - offset of data[] to start inserting byte from
+     * @param numBits - number of bits of input to be inserted into data[]
+     * @return void
+     * @throws BufferException
+     *             when the startOffset and numBits parameters are not congruent
+     *             with data and input buffers' size
+     */
+    public static void setBytes(byte[] data, byte[] input, int startOffset,
+            int numBits) throws BufferException {
+        checkExceptions(data, startOffset, numBits);
+        insertBits(data, input, startOffset, numBits);
+    }
+
+    /**
+     * Returns numBits 1's in the MSB position
+     *
+     * @param numBits
+     * @return
+     */
+    public static int getMSBMask(int numBits) {
+        int mask = 0;
+        for (int i = 0; i < numBits; i++) {
+            mask = mask | (1 << (7 - i));
+        }
+        return mask;
+    }
+
+    /**
+     * Returns numBits 1's in the LSB position
+     *
+     * @param numBits
+     * @return
+     */
+    public static int getLSBMask(int numBits) {
+        int mask = 0;
+        for (int i = 0; i < numBits; i++) {
+            mask = mask | (1 << i);
+        }
+        return mask;
+    }
+
+    /**
+     * Returns the numerical value of the byte array passed
+     *
+     * @param byte[] - array
+     * @return long - numerical value of byte array passed
+     */
+    static public long toNumber(byte[] array) {
+        long ret = 0;
+        long length = array.length;
+        int value = 0;
+        for (int i = 0; i < length; i++) {
+            value = array[i];
+            if (value < 0)
+                value += 256;
+            ret = ret
+                    | (long) ((long) value << ((length - i - 1) * NetUtils.NumBitsInAByte));
+        }
+        return ret;
+    }
+
+    /**
+     * Returns the numerical value of the last numBits (LSB bits) of the byte
+     * array passed
+     *
+     * @param byte[] - array
+     * @param int - numBits
+     * @return long - numerical value of byte array passed
+     */
+    static public long toNumber(byte[] array, int numBits) {
+        int length = numBits / NetUtils.NumBitsInAByte;
+        int bitsRest = numBits % NetUtils.NumBitsInAByte;
+        int startOffset = array.length - length;
+        long ret = 0;
+        int value = 0;
+
+        value = array[startOffset - 1] & getLSBMask(bitsRest);
+        value = (array[startOffset - 1] < 0) ? (array[startOffset - 1] + 256)
+                : array[startOffset - 1];
+        ret = ret
+                | (value << ((array.length - startOffset) * NetUtils.NumBitsInAByte));
+
+        for (int i = startOffset; i < array.length; i++) {
+            value = array[i];
+            if (value < 0)
+                value += 256;
+            ret = ret
+                    | (long) ((long) value << ((array.length - i - 1) * NetUtils.NumBitsInAByte));
+        }
+
+        return ret;
+    }
+
+    /**
+     * Accepts a number as input and returns its value in byte form in LSB
+     * aligned form example: input = 5000 [1001110001000] bytes = 19, -120
+     * [00010011] [10001000]
+     *
+     * @param Number
+     * @return byte[]
+     *
+     */
+
+    public static byte[] toByteArray(Number input) {
+        Class<? extends Number> dataType = input.getClass();
+        short size = 0;
+        long longValue = input.longValue();
+
+        if (dataType == Byte.class || dataType == byte.class) {
+            size = Byte.SIZE;
+        } else if (dataType == Short.class || dataType == short.class) {
+            size = Short.SIZE;
+        } else if (dataType == Integer.class || dataType == int.class) {
+            size = Integer.SIZE;
+        } else if (dataType == Long.class || dataType == long.class) {
+            size = Long.SIZE;
+        } else {
+            throw new IllegalArgumentException(
+                    "Parameter must one of the following: Short/Int/Long\n");
+        }
+
+        int length = size / NetUtils.NumBitsInAByte;
+        byte bytes[] = new byte[length];
+
+        // Getting the bytes from input value
+        for (int i = 0; i < length; i++) {
+            bytes[i] = (byte) ((longValue >> (NetUtils.NumBitsInAByte * (length
+                    - i - 1))) & ByteMask);
+        }
+        return bytes;
+    }
+
+    /**
+     * Accepts a number as input and returns its value in byte form in MSB
+     * aligned form example: input = 5000 [1001110001000] bytes = -114, 64
+     * [10011100] [01000000]
+     *
+     * @param Number
+     *            input
+     * @param int numBits - the number of bits to be returned
+     * @return byte[]
+     *
+     */
+    public static byte[] toByteArray(Number input, int numBits) {
+        Class<? extends Number> dataType = input.getClass();
+        short size = 0;
+        long longValue = input.longValue();
+
+        if (dataType == Short.class) {
+            size = Short.SIZE;
+        } else if (dataType == Integer.class) {
+            size = Integer.SIZE;
+        } else if (dataType == Long.class) {
+            size = Long.SIZE;
+        } else {
+            throw new IllegalArgumentException(
+                    "Parameter must one of the following: Short/Int/Long\n");
+        }
+
+        int length = size / NetUtils.NumBitsInAByte;
+        byte bytes[] = new byte[length];
+        byte[] inputbytes = new byte[length];
+        byte shiftedBytes[];
+
+        // Getting the bytes from input value
+        for (int i = 0; i < length; i++) {
+            bytes[i] = (byte) ((longValue >> (NetUtils.NumBitsInAByte * (length
+                    - i - 1))) & ByteMask);
+        }
+
+        if ((bytes[0] == 0 && dataType == Long.class)
+                || (bytes[0] == 0 && dataType == Integer.class)) {
+            int index = 0;
+            for (index = 0; index < length; ++index) {
+                if (bytes[index] != 0) {
+                    bytes[0] = bytes[index];
+                    break;
+                }
+            }
+            System.arraycopy(bytes, index, inputbytes, 0, length - index);
+            Arrays.fill(bytes, length - index + 1, length - 1, (byte) 0);
+        } else {
+            System.arraycopy(bytes, 0, inputbytes, 0, length);
+        }
+
+        shiftedBytes = shiftBitsToMSB(inputbytes, numBits);
+
+        return shiftedBytes;
+    }
+
+    /**
+     * Takes an LSB aligned byte array and returned the LSB numBits in a MSB
+     * aligned byte array
+     *
+     * @param inputbytes
+     * @param numBits
+     * @return
+     */
+    /**
+     * It aligns the last numBits bits to the head of the byte array following
+     * them with numBits % 8 zero bits.
+     *
+     * Example: For inputbytes = [00000111][01110001] and numBits = 12 it
+     * returns: shiftedBytes = [01110111][00010000]
+     *
+     * @param byte[] inputBytes
+     * @param int numBits - number of bits to be left aligned
+     * @return byte[]
+     */
+    public static byte[] shiftBitsToMSB(byte[] inputBytes, int numBits) {
+        int numBitstoShiftBy = 0, leadZeroesMSB = 8, numEndRestBits = 0;
+        int size = inputBytes.length;
+        byte[] shiftedBytes = new byte[size];
+        int i;
+
+        for (i = 0; i < Byte.SIZE; i++) {
+            if (((byte) (inputBytes[0] & getMSBMask(i + 1))) != 0) {
+                leadZeroesMSB = i;
+                break;
+            }
+        }
+
+        if (numBits % NetUtils.NumBitsInAByte == 0) {
+            numBitstoShiftBy = 0;
+        } else {
+            numBitstoShiftBy = ((NetUtils.NumBitsInAByte - (numBits % NetUtils.NumBitsInAByte)) < leadZeroesMSB) ? (NetUtils.NumBitsInAByte - (numBits % NetUtils.NumBitsInAByte))
+                    : leadZeroesMSB;
+        }
+        if (numBitstoShiftBy == 0) {
+            return inputBytes;
+        }
+
+        if (numBits < NetUtils.NumBitsInAByte) {
+            // inputbytes.length = 1 OR read less than a byte
+            shiftedBytes[0] = (byte) ((inputBytes[0] & getLSBMask(numBits)) << numBitstoShiftBy);
+        } else {
+            // # of bits to read from last byte
+            numEndRestBits = NetUtils.NumBitsInAByte
+                    - (inputBytes.length * NetUtils.NumBitsInAByte - numBits - numBitstoShiftBy);
+
+            for (i = 0; i < (size - 1); i++) {
+                if ((i + 1) == (size - 1)) {
+                    if (numEndRestBits > numBitstoShiftBy) {
+                        shiftedBytes[i] = (byte) ((inputBytes[i] << numBitstoShiftBy) | ((inputBytes[i + 1] & getMSBMask(numBitstoShiftBy)) >> (numEndRestBits - numBitstoShiftBy)));
+                        shiftedBytes[i + 1] = (byte) ((inputBytes[i + 1] & getLSBMask(numEndRestBits
+                                - numBitstoShiftBy)) << numBitstoShiftBy);
+                    } else
+                        shiftedBytes[i] = (byte) ((inputBytes[i] << numBitstoShiftBy) | ((inputBytes[i + 1] & getMSBMask(numEndRestBits)) >> (NetUtils.NumBitsInAByte - numEndRestBits)));
+                }
+                shiftedBytes[i] = (byte) ((inputBytes[i] << numBitstoShiftBy) | (inputBytes[i + 1] & getMSBMask(numBitstoShiftBy)) >> (NetUtils.NumBitsInAByte - numBitstoShiftBy));
+            }
+
+        }
+        return shiftedBytes;
+    }
+
+    /**
+     * It aligns the first numBits bits to the right end of the byte array
+     * preceding them with numBits % 8 zero bits.
+     *
+     * Example: For inputbytes = [01110111][00010000] and numBits = 12 it
+     * returns: shiftedBytes = [00000111][01110001]
+     *
+     * @param byte[] inputBytes
+     * @param int numBits - number of bits to be right aligned
+     * @return byte[]
+     */
+    public static byte[] shiftBitsToLSB(byte[] inputBytes, int numBits) {
+        int numBytes = inputBytes.length;
+        int numBitstoShift = numBits % NetUtils.NumBitsInAByte;
+        byte[] shiftedBytes = new byte[numBytes];
+        int inputLsb = 0, inputMsb = 0;
+
+        if (numBitstoShift == 0) {
+            return inputBytes;
+        }
+
+        for (int i = 1; i < numBytes; i++) {
+            inputLsb = inputBytes[i - 1]
+                    & getLSBMask(NetUtils.NumBitsInAByte - numBitstoShift);
+            inputLsb = (inputLsb < 0) ? (inputLsb + 256) : inputLsb;
+            inputMsb = inputBytes[i] & getMSBMask(numBitstoShift);
+            inputMsb = (inputBytes[i] < 0) ? (inputBytes[i] + 256)
+                    : inputBytes[i];
+            shiftedBytes[i] = (byte) ((inputLsb << numBitstoShift) | (inputMsb >> (NetUtils.NumBitsInAByte - numBitstoShift)));
+        }
+        inputMsb = inputBytes[0] & (getMSBMask(numBitstoShift));
+        inputMsb = (inputMsb < 0) ? (inputMsb + 256) : inputMsb;
+        shiftedBytes[0] = (byte) (inputMsb >> (NetUtils.NumBitsInAByte - numBitstoShift));
+        return shiftedBytes;
+    }
+
+    /**
+     * Insert in the data buffer at position dictated by the offset the number
+     * of bits specified from the input data byte array. The input byte array
+     * has the bits stored starting from the LSB
+     *
+     * @param byte[] data
+     * @param byte[] inputdata
+     * @param int startOffset
+     * @param int numBits
+     */
+    public static void insertBits(byte[] data, byte[] inputdataLSB,
+            int startOffset, int numBits) {
+        byte[] inputdata = shiftBitsToMSB(inputdataLSB, numBits); // Align to
+                                                                  // MSB the
+                                                                  // passed byte
+                                                                  // array
+        int numBytes = numBits / NetUtils.NumBitsInAByte;
+        int startByteOffset = startOffset / NetUtils.NumBitsInAByte;
+        int extraOffsetBits = startOffset % NetUtils.NumBitsInAByte;
+        int extranumBits = numBits % NetUtils.NumBitsInAByte;
+        int RestBits = numBits % NetUtils.NumBitsInAByte;
+        int InputMSBbits = 0, InputLSBbits = 0;
+        int i;
+
+        if (numBits == 0) {
+            return;
+        }
+
+        if (extraOffsetBits == 0) {
+            if (extranumBits == 0) {
+                numBytes = numBits / NetUtils.NumBitsInAByte;
+                System.arraycopy(inputdata, 0, data, startByteOffset, numBytes);
+            } else {
+                System.arraycopy(inputdata, 0, data, startByteOffset, numBytes);
+                data[startByteOffset + numBytes] = (byte) (data[startByteOffset
+                        + numBytes] | (inputdata[numBytes] & getMSBMask(extranumBits)));
+            }
+        } else {
+            for (i = 0; i < numBytes; i++) {
+                if (i != 0)
+                    InputLSBbits = (inputdata[i - 1] & getLSBMask(extraOffsetBits));
+                InputMSBbits = (byte) (inputdata[i] & (getMSBMask(NetUtils.NumBitsInAByte
+                        - extraOffsetBits)));
+                InputMSBbits = (InputMSBbits >= 0) ? InputMSBbits
+                        : InputMSBbits + 256;
+                data[startByteOffset + i] = (byte) (data[startByteOffset + i]
+                        | (InputLSBbits << (NetUtils.NumBitsInAByte - extraOffsetBits)) | (InputMSBbits >> extraOffsetBits));
+                InputMSBbits = InputLSBbits = 0;
+            }
+            if (RestBits < (NetUtils.NumBitsInAByte - extraOffsetBits)) {
+                if (numBytes != 0)
+                    InputLSBbits = (inputdata[i - 1] & getLSBMask(extraOffsetBits));
+                InputMSBbits = (byte) (inputdata[i] & (getMSBMask(RestBits)));
+                InputMSBbits = (InputMSBbits >= 0) ? InputMSBbits
+                        : InputMSBbits + 256;
+                data[startByteOffset + i] = (byte) ((data[startByteOffset + i])
+                        | (InputLSBbits << (NetUtils.NumBitsInAByte - extraOffsetBits)) | (InputMSBbits >> extraOffsetBits));
+            } else if (RestBits == (NetUtils.NumBitsInAByte - extraOffsetBits)) {
+                if (numBytes != 0)
+                    InputLSBbits = (inputdata[i - 1] & getLSBMask(extraOffsetBits));
+                InputMSBbits = (byte) (inputdata[i] & (getMSBMask(NetUtils.NumBitsInAByte
+                        - extraOffsetBits)));
+                InputMSBbits = (InputMSBbits >= 0) ? InputMSBbits
+                        : InputMSBbits + 256;
+                data[startByteOffset + i] = (byte) (data[startByteOffset + i]
+                        | (InputLSBbits << (NetUtils.NumBitsInAByte - extraOffsetBits)) | (InputMSBbits >> extraOffsetBits));
+            } else {
+                if (numBytes != 0)
+                    InputLSBbits = (inputdata[i - 1] & getLSBMask(extraOffsetBits));
+                InputMSBbits = (byte) (inputdata[i] & (getMSBMask(NetUtils.NumBitsInAByte
+                        - extraOffsetBits)));
+                InputMSBbits = (InputMSBbits >= 0) ? InputMSBbits
+                        : InputMSBbits + 256;
+                data[startByteOffset + i] = (byte) (data[startByteOffset + i]
+                        | (InputLSBbits << (NetUtils.NumBitsInAByte - extraOffsetBits)) | (InputMSBbits >> extraOffsetBits));
+
+                InputLSBbits = (inputdata[i] & (getLSBMask(RestBits
+                        - (NetUtils.NumBitsInAByte - extraOffsetBits)) << (NetUtils.NumBitsInAByte - RestBits)));
+                data[startByteOffset + i + 1] = (byte) (data[startByteOffset
+                        + i + 1] | (InputLSBbits << (NetUtils.NumBitsInAByte - extraOffsetBits)));
+            }
+        }
+    }
+
+    /**
+     * Checks for overflow and underflow exceptions
+     * @param data
+     * @param startOffset
+     * @param numBits
+     * @throws PacketException when the startOffset and numBits parameters
+     *                    are not congruent with the data buffer's size
+     */
+    public static void checkExceptions(byte[] data, int startOffset, int numBits)
+            throws BufferException {
+        int endOffsetByte;
+        int startByteOffset;
+        endOffsetByte = startOffset
+                / NetUtils.NumBitsInAByte
+                + numBits
+                / NetUtils.NumBitsInAByte
+                + ((numBits % NetUtils.NumBitsInAByte != 0) ? 1 : ((startOffset
+                        % NetUtils.NumBitsInAByte != 0) ? 1 : 0));
+        startByteOffset = startOffset / NetUtils.NumBitsInAByte;
+
+        if (data == null) {
+            throw new BufferException("data[] is null\n");
+        }
+
+        if ((startOffset < 0) || (startByteOffset >= data.length)
+                || (endOffsetByte > data.length) || (numBits < 0)
+                || (numBits > NetUtils.NumBitsInAByte * data.length)) {
+            throw new BufferException(
+                    "Illegal arguement/out of bound exception - data.length = "
+                            + data.length + " startOffset = " + startOffset
+                            + " numBits " + numBits);
+        }
+    }
+}
diff --git a/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/BufferException.java b/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/BufferException.java
new file mode 100644 (file)
index 0000000..fa0848d
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.liblldp;
+
+/**
+ * Describes an exception that is raised during BitBufferHelper operations.
+ */
+public class BufferException extends Exception {
+    private static final long serialVersionUID = 1L;
+
+    public BufferException(String message) {
+        super(message);
+    }
+}
diff --git a/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/ConstructionException.java b/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/ConstructionException.java
new file mode 100644 (file)
index 0000000..8b1d9d2
--- /dev/null
@@ -0,0 +1,28 @@
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+/**
+ * @file   ConstructionException.java
+ *
+ *
+ * @brief  Describe an exception that is raised when a construction
+ * for a Node/NodeConnector/Edge or any of the SAL basic object fails
+ * because input passed are not valid or compatible
+ *
+ *
+ */
+package org.opendaylight.controller.liblldp;
+
+public class ConstructionException extends Exception {
+    private static final long serialVersionUID = 1L;
+
+    public ConstructionException(String message) {
+        super(message);
+    }
+}
diff --git a/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/DataLinkAddress.java b/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/DataLinkAddress.java
new file mode 100644 (file)
index 0000000..d617c05
--- /dev/null
@@ -0,0 +1,96 @@
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.io.Serializable;
+
+import javax.xml.bind.annotation.XmlRootElement;
+
+/**
+ * @file   DataLinkAddress.java
+ *
+ * @brief  Abstract base class for a Datalink Address
+ *
+ */
+
+/**
+ * Abstract base class for a Datalink Address
+ *
+ */
+@XmlRootElement
+abstract public class DataLinkAddress implements Serializable {
+    private static final long serialVersionUID = 1L;
+    private String name;
+
+    public DataLinkAddress() {
+
+    }
+
+    /**
+     * Constructor of super class
+     *
+     * @param name Create a new DataLink, not for general use but
+     * available only for sub classes
+     *
+     * @return constructed object
+     */
+    protected DataLinkAddress(String name) {
+        this.name = name;
+    }
+
+    /**
+     * Used to copy the DataLinkAddress in a polymorphic way
+     *
+     *
+     * @return A clone of this DataLinkAddress
+     */
+    @Override
+    abstract public DataLinkAddress clone();
+
+    /**
+     * Allow to distinguish among different data link addresses
+     *
+     *
+     * @return Name of the DataLinkAdress we are working on
+     */
+    public String getName() {
+        return this.name;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((name == null) ? 0 : name.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        DataLinkAddress other = (DataLinkAddress) obj;
+        if (name == null) {
+            if (other.name != null)
+                return false;
+        } else if (!name.equals(other.name))
+            return false;
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return "DataLinkAddress [name=" + name + "]";
+    }
+}
diff --git a/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/EtherTypes.java b/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/EtherTypes.java
new file mode 100644 (file)
index 0000000..876d495
--- /dev/null
@@ -0,0 +1,117 @@
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * The enum contains the most common 802.3 ethernet types and 802.2 + SNAP protocol ids
+ *
+ *
+ *
+ */
+public enum EtherTypes {
+    PVSTP("PVSTP", 0x010B), // 802.2 + SNAP (Spanning Tree)
+    CDP("CDP", 0x2000), // 802.2 + SNAP
+    VTP("VTP", 0x2003), // 802.2 + SNAP
+    IPv4("IPv4", 0x800), ARP("ARP", 0x806), RARP("Reverse ARP", 0x8035), VLANTAGGED(
+            "VLAN Tagged", 0x8100), // 802.1Q
+    IPv6("IPv6", 0x86DD), MPLSUCAST("MPLS Unicast", 0x8847), MPLSMCAST(
+            "MPLS Multicast", 0x8848), QINQ("QINQ", 0x88A8), // Standard 802.1ad QinQ
+    LLDP("LLDP", 0x88CC), OLDQINQ("Old QINQ", 0x9100), // Old non-standard QinQ
+    CISCOQINQ("Cisco QINQ", 0x9200); // Cisco non-standard QinQ
+
+    private static final String regexNumberString = "^[0-9]+$";
+    private String description;
+    private int number;
+
+    private EtherTypes(String description, int number) {
+        this.description = description;
+        this.number = number;
+    }
+
+    public String toString() {
+        return description;
+    }
+
+    public int intValue() {
+        return number;
+    }
+
+    public short shortValue() {
+        return ((Integer) number).shortValue();
+    }
+
+    public static String getEtherTypeName(int number) {
+        return getEtherTypeInternal(number);
+    }
+
+    public static String getEtherTypeName(short number) {
+        return getEtherTypeInternal((int) number & 0xffff);
+    }
+
+    public static String getEtherTypeName(byte number) {
+        return getEtherTypeInternal((int) number & 0xff);
+    }
+
+    private static String getEtherTypeInternal(int number) {
+        for (EtherTypes type : EtherTypes.values()) {
+            if (type.number == number) {
+                return type.toString();
+            }
+        }
+        return "0x" + Integer.toHexString(number);
+    }
+
+    public static short getEtherTypeNumberShort(String name) {
+        if (name.matches(regexNumberString)) {
+            return Short.valueOf(name);
+        }
+        for (EtherTypes type : EtherTypes.values()) {
+            if (type.description.equalsIgnoreCase(name)) {
+                return type.shortValue();
+            }
+        }
+        return 0;
+    }
+
+    public static int getEtherTypeNumberInt(String name) {
+        if (name.matches(regexNumberString)) {
+            return Integer.valueOf(name);
+        }
+        for (EtherTypes type : EtherTypes.values()) {
+            if (type.description.equalsIgnoreCase(name)) {
+                return type.intValue();
+            }
+        }
+        return 0;
+    }
+
+    public static List<String> getEtherTypesNameList() {
+        List<String> ethertypesList = new ArrayList<String>();
+        for (EtherTypes type : EtherTypes.values()) {
+            ethertypesList.add(type.toString());
+        }
+        return ethertypesList;
+    }
+
+    public static EtherTypes loadFromString(String string) {
+        int intType = Integer.parseInt(string);
+
+        for (EtherTypes type : EtherTypes.values()) {
+            if (type.number == intType) {
+                return type;
+            }
+        }
+        return null;
+    }
+
+}
diff --git a/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/Ethernet.java b/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/Ethernet.java
new file mode 100644 (file)
index 0000000..54452bb
--- /dev/null
@@ -0,0 +1,134 @@
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.commons.lang3.tuple.Pair;
+
+/**
+ * Class that represents the Ethernet frame objects
+ */
+public class Ethernet extends Packet {
+    private static final String DMAC = "DestinationMACAddress";
+    private static final String SMAC = "SourceMACAddress";
+    private static final String ETHT = "EtherType";
+
+    // TODO: This has to be outside and it should be possible for osgi
+    // to add new coming packet classes
+    public static final Map<Short, Class<? extends Packet>> etherTypeClassMap;
+    static {
+        etherTypeClassMap = new HashMap<Short, Class<? extends Packet>>();
+        etherTypeClassMap.put(EtherTypes.LLDP.shortValue(), LLDP.class);
+    }
+    private static Map<String, Pair<Integer, Integer>> fieldCoordinates = new LinkedHashMap<String, Pair<Integer, Integer>>() {
+        private static final long serialVersionUID = 1L;
+        {
+            put(DMAC, new ImmutablePair<Integer, Integer>(0, 48));
+            put(SMAC, new ImmutablePair<Integer, Integer>(48, 48));
+            put(ETHT, new ImmutablePair<Integer, Integer>(96, 16));
+        }
+    };
+    private final Map<String, byte[]> fieldValues;
+
+    /**
+     * Default constructor that creates and sets the HashMap
+     */
+    public Ethernet() {
+        super();
+        fieldValues = new HashMap<String, byte[]>();
+        hdrFieldCoordMap = fieldCoordinates;
+        hdrFieldsMap = fieldValues;
+    }
+
+    /**
+     * Constructor that sets the access level for the packet and
+     * creates and sets the HashMap
+     */
+    public Ethernet(boolean writeAccess) {
+        super(writeAccess);
+        fieldValues = new HashMap<String, byte[]>();
+        hdrFieldCoordMap = fieldCoordinates;
+        hdrFieldsMap = fieldValues;
+    }
+
+    @Override
+    public void setHeaderField(String headerField, byte[] readValue) {
+        if (headerField.equals(ETHT)) {
+            payloadClass = etherTypeClassMap.get(BitBufferHelper
+                    .getShort(readValue));
+        }
+        hdrFieldsMap.put(headerField, readValue);
+    }
+
+    /**
+     * Gets the destination MAC address stored
+     * @return byte[] - the destinationMACAddress
+     */
+    public byte[] getDestinationMACAddress() {
+        return fieldValues.get(DMAC);
+    }
+
+    /**
+     * Gets the source MAC address stored
+     * @return byte[] - the sourceMACAddress
+     */
+    public byte[] getSourceMACAddress() {
+        return fieldValues.get(SMAC);
+    }
+
+    /**
+     * Gets the etherType stored
+     * @return short - the etherType
+     */
+    public short getEtherType() {
+        return BitBufferHelper.getShort(fieldValues.get(ETHT));
+    }
+
+    public boolean isBroadcast(){
+        return NetUtils.isBroadcastMACAddr(getDestinationMACAddress());
+    }
+
+    public boolean isMulticast(){
+        return NetUtils.isMulticastMACAddr(getDestinationMACAddress());
+    }
+
+    /**
+     * Sets the destination MAC address for the current Ethernet object instance
+     * @param byte[] - the destinationMACAddress to set
+     */
+    public Ethernet setDestinationMACAddress(byte[] destinationMACAddress) {
+        fieldValues.put(DMAC, destinationMACAddress);
+        return this;
+    }
+
+    /**
+     * Sets the source MAC address for the current Ethernet object instance
+     * @param byte[] - the sourceMACAddress to set
+     */
+    public Ethernet setSourceMACAddress(byte[] sourceMACAddress) {
+        fieldValues.put(SMAC, sourceMACAddress);
+        return this;
+    }
+
+    /**
+     * Sets the etherType for the current Ethernet object instance
+     * @param short - the etherType to set
+     */
+    public Ethernet setEtherType(short etherType) {
+        byte[] ethType = BitBufferHelper.toByteArray(etherType);
+        fieldValues.put(ETHT, ethType);
+        return this;
+    }
+
+}
diff --git a/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/EthernetAddress.java b/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/EthernetAddress.java
new file mode 100644 (file)
index 0000000..b7b72cb
--- /dev/null
@@ -0,0 +1,124 @@
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.util.Arrays;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import javax.xml.bind.annotation.XmlTransient;
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+public class EthernetAddress extends DataLinkAddress {
+    private static final long serialVersionUID = 1L;
+    @XmlTransient
+    private byte[] macAddress;
+
+    public static final EthernetAddress BROADCASTMAC = createWellKnownAddress(new byte[] {
+            (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff,
+            (byte) 0xff });
+
+    public static final EthernetAddress INVALIDHOST = BROADCASTMAC;
+
+    public static final String addressName = "Ethernet MAC Address";
+    public static final int SIZE = 6;
+
+    private static final EthernetAddress createWellKnownAddress(byte[] mac) {
+        try {
+            return new EthernetAddress(mac);
+        } catch (ConstructionException ce) {
+            return null;
+        }
+    }
+
+    /* Private constructor to satisfy JAXB */
+    @SuppressWarnings("unused")
+    private EthernetAddress() {
+    }
+
+    /**
+     * Public constructor for an Ethernet MAC address starting from
+     * the byte constituing the address, the constructor validate the
+     * size of the arrive to make sure it met the expected size
+     *
+     * @param macAddress A byte array in big endian format
+     * representing the Ethernet MAC Address
+     *
+     * @return The constructed object if valid
+     */
+    public EthernetAddress(byte[] macAddress) throws ConstructionException {
+        super(addressName);
+
+        if (macAddress == null) {
+            throw new ConstructionException("Null input parameter passed");
+        }
+
+        if (macAddress.length != SIZE) {
+            throw new ConstructionException(
+                    "Wrong size of passed byte array, expected:" + SIZE
+                            + " got:" + macAddress.length);
+        }
+        this.macAddress = new byte[SIZE];
+        System.arraycopy(macAddress, 0, this.macAddress, 0, SIZE);
+    }
+
+    public EthernetAddress clone() {
+        try {
+            return new EthernetAddress(this.macAddress.clone());
+        } catch (ConstructionException ce) {
+            return null;
+        }
+    }
+
+    /**
+     * Return the Ethernet Mac address in byte array format
+     *
+     * @return The Ethernet Mac address in byte array format
+     */
+    public byte[] getValue() {
+        return this.macAddress;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = super.hashCode();
+        result = prime * result + Arrays.hashCode(macAddress);
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (!super.equals(obj))
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        EthernetAddress other = (EthernetAddress) obj;
+        if (!Arrays.equals(macAddress, other.macAddress))
+            return false;
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return "EthernetAddress [macAddress=" + HexEncode.bytesToHexStringFormat(macAddress)
+                + "]";
+    }
+
+    @XmlElement(name = "macAddress")
+    public String getMacAddress() {
+        return HexEncode.bytesToHexStringFormat(macAddress);
+    }
+}
diff --git a/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/HexEncode.java b/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/HexEncode.java
new file mode 100644 (file)
index 0000000..8236d4c
--- /dev/null
@@ -0,0 +1,114 @@
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.math.BigInteger;
+
+/**
+ * The class provides methods to convert hex encode strings
+ *
+ *
+ */
+public class HexEncode {
+    /**
+     * This method converts byte array into String format without ":" inserted.
+     *
+     * @param bytes
+     *            The byte array to convert to string
+     * @return The hexadecimal representation of the byte array. If bytes is
+     *         null, "null" string is returned
+     */
+    public static String bytesToHexString(byte[] bytes) {
+
+        if (bytes == null) {
+            return "null";
+        }
+
+        String ret = "";
+        StringBuffer buf = new StringBuffer();
+        for (int i = 0; i < bytes.length; i++) {
+            if (i > 0) {
+                ret += ":";
+            }
+            short u8byte = (short) (bytes[i] & 0xff);
+            String tmp = Integer.toHexString(u8byte);
+            if (tmp.length() == 1) {
+                buf.append("0");
+            }
+            buf.append(tmp);
+        }
+        ret = buf.toString();
+        return ret;
+    }
+
+    public static String longToHexString(long val) {
+        char arr[] = Long.toHexString(val).toCharArray();
+        StringBuffer buf = new StringBuffer();
+        // prepend the right number of leading zeros
+        int i = 0;
+        for (; i < (16 - arr.length); i++) {
+            buf.append("0");
+            if ((i & 0x01) == 1) {
+                buf.append(":");
+            }
+        }
+        for (int j = 0; j < arr.length; j++) {
+            buf.append(arr[j]);
+            if ((((i + j) & 0x01) == 1) && (j < (arr.length - 1))) {
+                buf.append(":");
+            }
+        }
+        return buf.toString();
+    }
+
+
+    public static byte[] bytesFromHexString(String values) {
+        String target = "";
+        if (values != null) {
+            target = values;
+        }
+        String[] octets = target.split(":");
+
+        byte[] ret = new byte[octets.length];
+        for (int i = 0; i < octets.length; i++) {
+            ret[i] = Integer.valueOf(octets[i], 16).byteValue();
+        }
+        return ret;
+    }
+
+    public static long stringToLong(String values) {
+        long value = new BigInteger(values.replaceAll(":", ""), 16).longValue();
+        return value;
+    }
+
+    /**
+     * This method converts byte array into HexString format with ":" inserted.
+     */
+    public static String bytesToHexStringFormat(byte[] bytes) {
+        if (bytes == null) {
+            return "null";
+        }
+        String ret = "";
+        StringBuffer buf = new StringBuffer();
+        for (int i = 0; i < bytes.length; i++) {
+            if (i > 0) {
+                buf.append(":");
+            }
+            short u8byte = (short) (bytes[i] & 0xff);
+            String tmp = Integer.toHexString(u8byte);
+            if (tmp.length() == 1) {
+                buf.append("0");
+            }
+            buf.append(tmp);
+        }
+        ret = buf.toString();
+        return ret;
+    }
+}
diff --git a/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/LLDP.java b/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/LLDP.java
new file mode 100644 (file)
index 0000000..9b7efbb
--- /dev/null
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Class that represents the LLDP frame objects
+ */
+
+public class LLDP extends Packet {
+    private static final String CHASSISID = "ChassisId";
+    private static final String SYSTEMNAMEID = "SystemNameID";
+    private static final String PORTID = "PortId";
+    private static final String TTL = "TTL";
+    private static final int LLDPDefaultTlvs = 4;
+    private static LLDPTLV emptyTLV = new LLDPTLV().setLength((short) 0)
+            .setType((byte) 0);
+    public static final byte[] LLDPMulticastMac = { 1, (byte) 0x80,
+            (byte) 0xc2, 0, 0, (byte) 0xe };
+    private Map<Byte, LLDPTLV> tlvList;
+
+    /**
+     * Default constructor that creates the tlvList LinkedHashMap
+     */
+    public LLDP() {
+        super();
+        tlvList = new LinkedHashMap<Byte, LLDPTLV>(LLDPDefaultTlvs);
+    }
+
+    /**
+     * Constructor that creates the tlvList LinkedHashMap and sets the write
+     * access for the same
+     */
+    public LLDP(boolean writeAccess) {
+        super(writeAccess);
+        tlvList = new LinkedHashMap<Byte, LLDPTLV>(LLDPDefaultTlvs); // Mandatory
+                                                                     // TLVs
+    }
+
+    /**
+     * @param String
+     *            - description of the type of TLV
+     * @return byte - type of TLV
+     */
+    private byte getType(String typeDesc) {
+        if (typeDesc.equals(CHASSISID)) {
+            return LLDPTLV.TLVType.ChassisID.getValue();
+        } else if (typeDesc.equals(PORTID)) {
+            return LLDPTLV.TLVType.PortID.getValue();
+        } else if (typeDesc.equals(TTL)) {
+            return LLDPTLV.TLVType.TTL.getValue();
+        } else {
+            return LLDPTLV.TLVType.Unknown.getValue();
+        }
+    }
+
+    /**
+     * @param String
+     *            - description of the type of TLV
+     * @return LLDPTLV - full TLV
+     */
+    public LLDPTLV getTLV(String type) {
+        return tlvList.get(getType(type));
+    }
+
+    /**
+     * @param String
+     *            - description of the type of TLV
+     * @param LLDPTLV
+     *            - tlv to set
+     * @return void
+     */
+    public void setTLV(String type, LLDPTLV tlv) {
+        tlvList.put(getType(type), tlv);
+    }
+
+    /**
+     * @return the chassisId TLV
+     */
+    public LLDPTLV getChassisId() {
+        return getTLV(CHASSISID);
+    }
+
+    /**
+     * @param LLDPTLV
+     *            - the chassisId to set
+     */
+    public LLDP setChassisId(LLDPTLV chassisId) {
+        tlvList.put(getType(CHASSISID), chassisId);
+        return this;
+    }
+
+    /**
+     * @return the SystemName TLV
+     */
+    public LLDPTLV getSystemNameId() {
+        return getTLV(SYSTEMNAMEID);
+    }
+
+    /**
+     * @param LLDPTLV
+     *            - the chassisId to set
+     */
+    public LLDP setSystemNameId(LLDPTLV systemNameId) {
+        tlvList.put(getType(SYSTEMNAMEID), systemNameId);
+        return this;
+    }
+
+    /**
+     * @return LLDPTLV - the portId TLV
+     */
+    public LLDPTLV getPortId() {
+        return tlvList.get(getType(PORTID));
+    }
+
+    /**
+     * @param LLDPTLV
+     *            - the portId to set
+     * @return LLDP
+     */
+    public LLDP setPortId(LLDPTLV portId) {
+        tlvList.put(getType(PORTID), portId);
+        return this;
+    }
+
+    /**
+     * @return LLDPTLV - the ttl TLV
+     */
+    public LLDPTLV getTtl() {
+        return tlvList.get(getType(TTL));
+    }
+
+    /**
+     * @param LLDPTLV
+     *            - the ttl to set
+     * @return LLDP
+     */
+    public LLDP setTtl(LLDPTLV ttl) {
+        tlvList.put(getType(TTL), ttl);
+        return this;
+    }
+
+    /**
+     * @return the optionalTLVList
+     */
+    public List<LLDPTLV> getOptionalTLVList() {
+        List<LLDPTLV> list = new ArrayList<LLDPTLV>();
+        for (Map.Entry<Byte, LLDPTLV> entry : tlvList.entrySet()) {
+            byte type = entry.getKey();
+            if ((type == LLDPTLV.TLVType.ChassisID.getValue())
+                    || (type == LLDPTLV.TLVType.PortID.getValue())
+                    || (type == LLDPTLV.TLVType.TTL.getValue())) {
+                continue;
+            } else {
+                list.add(entry.getValue());
+            }
+        }
+        return list;
+    }
+
+    /**
+     * @param optionalTLVList
+     *            the optionalTLVList to set
+     * @return LLDP
+     */
+    public LLDP setOptionalTLVList(List<LLDPTLV> optionalTLVList) {
+        for (LLDPTLV tlv : optionalTLVList) {
+            tlvList.put(tlv.getType(), tlv);
+        }
+        return this;
+    }
+
+    @Override
+    public Packet deserialize(byte[] data, int bitOffset, int size)
+            throws PacketException {
+        int lldpOffset = bitOffset; // LLDP start
+        int lldpSize = size; // LLDP size
+
+        if (logger.isTraceEnabled()) {
+          logger.trace("LLDP: {} (offset {} bitsize {})", new Object[] {
+                  HexEncode.bytesToHexString(data), lldpOffset, lldpSize });
+        }
+        /*
+         * Deserialize the TLVs until we reach the end of the packet
+         */
+        while (lldpSize > 0) {
+            LLDPTLV tlv = new LLDPTLV();
+            tlv.deserialize(data, lldpOffset, lldpSize);
+            if (tlv.getType() == 0 && tlv.getLength() == 0) {
+               break;
+            }
+            int tlvSize = tlv.getTLVSize(); // Size of current TLV in bits
+            lldpOffset += tlvSize;
+            lldpSize -= tlvSize;
+            this.tlvList.put(tlv.getType(), tlv);
+        }
+        return this;
+    }
+
+    @Override
+    public byte[] serialize() throws PacketException {
+        int startOffset = 0;
+        byte[] serializedBytes = new byte[getLLDPPacketLength()];
+
+        for (Map.Entry<Byte, LLDPTLV> entry : tlvList.entrySet()) {
+            LLDPTLV tlv = entry.getValue();
+            int numBits = tlv.getTLVSize();
+            try {
+                BitBufferHelper.setBytes(serializedBytes, tlv.serialize(),
+                        startOffset, numBits);
+            } catch (BufferException e) {
+                throw new PacketException(e.getMessage());
+            }
+            startOffset += numBits;
+        }
+        // Now add the empty LLDPTLV at the end
+        try {
+            BitBufferHelper.setBytes(serializedBytes,
+                    LLDP.emptyTLV.serialize(), startOffset,
+                    LLDP.emptyTLV.getTLVSize());
+        } catch (BufferException e) {
+            throw new PacketException(e.getMessage());
+        }
+
+        if (logger.isTraceEnabled()) {
+          logger.trace("LLDP: serialized: {}",
+                  HexEncode.bytesToHexString(serializedBytes));
+        }
+        return serializedBytes;
+    }
+
+    /**
+     * Returns the size of LLDP packet in bytes
+     *
+     * @return int - LLDP Packet size in bytes
+     */
+    private int getLLDPPacketLength() {
+        int len = 0;
+        LLDPTLV tlv;
+
+        for (Map.Entry<Byte, LLDPTLV> entry : this.tlvList.entrySet()) {
+            tlv = entry.getValue();
+            len += tlv.getTLVSize();
+        }
+        len += LLDP.emptyTLV.getTLVSize();
+
+        return len / NetUtils.NumBitsInAByte;
+    }
+}
diff --git a/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/LLDPTLV.java b/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/LLDPTLV.java
new file mode 100644 (file)
index 0000000..22bd462
--- /dev/null
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.io.UnsupportedEncodingException;
+import java.nio.charset.Charset;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.commons.lang3.tuple.MutablePair;
+import org.apache.commons.lang3.tuple.Pair;
+
+/**
+ * Class that represents the LLDPTLV objects
+ */
+
+public class LLDPTLV extends Packet {
+    private static final String TYPE = "Type";
+    private static final String LENGTH = "Length";
+    private static final String VALUE = "Value";
+    private static final int LLDPTLVFields = 3;
+    public static final byte[] OFOUI = new byte[] { (byte) 0x00, (byte) 0x26,
+        (byte) 0xe1 }; // OpenFlow OUI
+    public static final byte[] customTlvSubType = new byte[] { 0 };
+    public static final int customTlvOffset = OFOUI.length
+            + customTlvSubType.length;
+    public static final byte chassisIDSubType[] = new byte[] { 4 }; // MAC address for the system
+    public static final byte portIDSubType[] = new byte[] { 7 }; // locally assigned
+
+    public enum TLVType {
+        Unknown((byte) 0), ChassisID((byte) 1), PortID((byte) 2), TTL((byte) 3), PortDesc(
+                (byte) 4), SystemName((byte) 5), SystemDesc((byte) 6), Custom(
+                        (byte) 127);
+
+        private byte value;
+
+        private TLVType(byte value) {
+            this.value = value;
+        }
+
+        public byte getValue() {
+            return value;
+        }
+    }
+
+    private static Map<String, Pair<Integer, Integer>> fieldCoordinates = new LinkedHashMap<String, Pair<Integer, Integer>>() {
+        private static final long serialVersionUID = 1L;
+
+        {
+            put(TYPE, new MutablePair<Integer, Integer>(0, 7));
+            put(LENGTH, new MutablePair<Integer, Integer>(7, 9));
+            put(VALUE, new MutablePair<Integer, Integer>(16, 0));
+        }
+    };
+
+    protected Map<String, byte[]> fieldValues;
+
+    /**
+     * Default constructor that creates and sets the hash map values and sets
+     * the payload to null
+     */
+    public LLDPTLV() {
+        payload = null;
+        fieldValues = new HashMap<String, byte[]>(LLDPTLVFields);
+        hdrFieldCoordMap = fieldCoordinates;
+        hdrFieldsMap = fieldValues;
+    }
+
+    /**
+     * Constructor that writes the passed LLDPTLV values to the hdrFieldsMap
+     */
+    public LLDPTLV(LLDPTLV other) {
+        for (Map.Entry<String, byte[]> entry : other.hdrFieldsMap.entrySet()) {
+            this.hdrFieldsMap.put(entry.getKey(), entry.getValue());
+        }
+    }
+
+    /**
+     * @return int - the length of TLV
+     */
+    public int getLength() {
+        return (int) BitBufferHelper.toNumber(fieldValues.get(LENGTH),
+                fieldCoordinates.get(LENGTH).getRight().intValue());
+    }
+
+    /**
+     * @return byte - the type of TLV
+     */
+    public byte getType() {
+        return BitBufferHelper.getByte(fieldValues.get(TYPE));
+    }
+
+    /**
+     * @return byte[] - the value field of TLV
+     */
+    public byte[] getValue() {
+        return fieldValues.get(VALUE);
+    }
+
+    /**
+     * @param byte - the type to set
+     * @return LLDPTLV
+     */
+    public LLDPTLV setType(byte type) {
+        byte[] lldpTLVtype = { type };
+        fieldValues.put(TYPE, lldpTLVtype);
+        return this;
+    }
+
+    /**
+     * @param short - the length to set
+     * @return LLDPTLV
+     */
+    public LLDPTLV setLength(short length) {
+        fieldValues.put(LENGTH, BitBufferHelper.toByteArray(length));
+        return this;
+    }
+
+    /**
+     * @param byte[] - the value to set
+     * @return LLDPTLV
+     */
+    public LLDPTLV setValue(byte[] value) {
+        fieldValues.put(VALUE, value);
+        return this;
+    }
+
+    @Override
+    public void setHeaderField(String headerField, byte[] readValue) {
+        hdrFieldsMap.put(headerField, readValue);
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = super.hashCode();
+        result = prime * result
+                + ((fieldValues == null) ? 0 : fieldValues.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+        if (!super.equals(obj)) {
+            return false;
+        }
+        if (getClass() != obj.getClass()) {
+            return false;
+        }
+        LLDPTLV other = (LLDPTLV) obj;
+        if (fieldValues == null) {
+            if (other.fieldValues != null) {
+                return false;
+            }
+        } else if (!fieldValues.equals(other.fieldValues)) {
+            return false;
+        }
+        return true;
+    }
+
+    @Override
+    public int getfieldnumBits(String fieldName) {
+        if (fieldName.equals(VALUE)) {
+            return (NetUtils.NumBitsInAByte * BitBufferHelper.getShort(
+                    fieldValues.get(LENGTH), fieldCoordinates.get(LENGTH)
+                    .getRight().intValue()));
+        }
+        return fieldCoordinates.get(fieldName).getRight();
+    }
+
+    /**
+     * Returns the size in bits of the whole TLV
+     *
+     * @return int - size in bits of full TLV
+     */
+    public int getTLVSize() {
+        return (LLDPTLV.fieldCoordinates.get(TYPE).getRight() + // static
+                LLDPTLV.fieldCoordinates.get(LENGTH).getRight() + // static
+                getfieldnumBits(VALUE)); // variable
+    }
+
+    /**
+     * Creates the SystemName TLV value
+     *
+     * @param nodeId
+     *            node identifier string
+     * @return the SystemName TLV value in byte array
+     */
+    static public byte[] createSystemNameTLVValue(String nodeId) {
+        byte[] nid = nodeId.getBytes();
+        return nid;
+    }
+
+    /**
+     * Creates the ChassisID TLV value including the subtype and ChassisID
+     * string
+     *
+     * @param nodeId
+     *            node identifier string
+     * @return the ChassisID TLV value in byte array
+     */
+    static public byte[] createChassisIDTLVValue(String nodeId) {
+        byte[] nid = HexEncode.bytesFromHexString(nodeId);
+        byte[] cid = new byte[6];
+        int srcPos = 0, dstPos = 0;
+
+        if (nid.length > cid.length) {
+            srcPos = nid.length - cid.length;
+        } else {
+            dstPos = cid.length - nid.length;
+        }
+        System.arraycopy(nid, srcPos, cid, dstPos, cid.length);
+
+        byte[] cidValue = new byte[cid.length + chassisIDSubType.length];
+
+        System.arraycopy(chassisIDSubType, 0, cidValue, 0,
+                chassisIDSubType.length);
+        System.arraycopy(cid, 0, cidValue, chassisIDSubType.length, cid.length);
+
+        return cidValue;
+    }
+
+    /**
+     * Creates the PortID TLV value including the subtype and PortID string
+     *
+     * @param portId
+     *            port identifier string
+     * @return the PortID TLV value in byte array
+     */
+    static public byte[] createPortIDTLVValue(String portId) {
+        byte[] pid = portId.getBytes(Charset.defaultCharset());
+        byte[] pidValue = new byte[pid.length + portIDSubType.length];
+
+        System.arraycopy(portIDSubType, 0, pidValue, 0, portIDSubType.length);
+        System.arraycopy(pid, 0, pidValue, portIDSubType.length, pid.length);
+
+        return pidValue;
+    }
+
+    /**
+     * Creates the custom TLV value including OUI, subtype and custom string
+     *
+     * @param portId
+     *            port identifier string
+     * @return the custom TLV value in byte array
+     */
+    static public byte[] createCustomTLVValue(String customString) {
+        byte[] customArray = customString.getBytes(Charset.defaultCharset());
+        byte[] customValue = new byte[customTlvOffset + customArray.length];
+
+        System.arraycopy(OFOUI, 0, customValue, 0, OFOUI.length);
+        System.arraycopy(customTlvSubType, 0, customValue, OFOUI.length,
+                customTlvSubType.length);
+        System.arraycopy(customArray, 0, customValue, customTlvOffset,
+                customArray.length);
+
+        return customValue;
+    }
+
+    /**
+     * Retrieves the string from TLV value and returns it in HexString format
+     *
+     * @param tlvValue
+     *            the TLV value
+     * @param tlvLen
+     *            the TLV length
+     * @return the HexString
+     */
+    static public String getHexStringValue(byte[] tlvValue, int tlvLen) {
+        byte[] cidBytes = new byte[tlvLen - chassisIDSubType.length];
+        System.arraycopy(tlvValue, chassisIDSubType.length, cidBytes, 0,
+                cidBytes.length);
+        return HexEncode.bytesToHexStringFormat(cidBytes);
+    }
+
+    /**
+     * Retrieves the string from TLV value
+     *
+     * @param tlvValue
+     *            the TLV value
+     * @param tlvLen
+     *            the TLV length
+     * @return the string
+     */
+    static public String getStringValue(byte[] tlvValue, int tlvLen) {
+        byte[] pidSubType = new byte[portIDSubType.length];
+        byte[] pidBytes = new byte[tlvLen - portIDSubType.length];
+        System.arraycopy(tlvValue, 0, pidSubType, 0,
+                pidSubType.length);
+        System.arraycopy(tlvValue, portIDSubType.length, pidBytes, 0,
+                pidBytes.length);
+        if (pidSubType[0] == (byte) 0x3) {
+            return HexEncode.bytesToHexStringFormat(pidBytes);
+        } else {
+            return (new String(pidBytes, Charset.defaultCharset()));
+        }
+    }
+
+    /**
+     * Retrieves the custom string from the Custom TLV value which includes OUI,
+     * subtype and custom string
+     *
+     * @param customTlvValue
+     *            the custom TLV value
+     * @param customTlvLen
+     *            the custom TLV length
+     * @return the custom string
+     */
+    static public String getCustomString(byte[] customTlvValue, int customTlvLen) {
+        String customString = "";
+        byte[] vendor = new byte[3];
+        System.arraycopy(customTlvValue, 0, vendor, 0, vendor.length);
+        if (Arrays.equals(vendor, LLDPTLV.OFOUI)) {
+            int customArrayLength = customTlvLen - customTlvOffset;
+            byte[] customArray = new byte[customArrayLength];
+            System.arraycopy(customTlvValue, customTlvOffset, customArray, 0,
+                    customArrayLength);
+            try {
+                customString = new String(customArray, "UTF-8");
+            } catch (UnsupportedEncodingException e) {
+            }
+        }
+
+        return customString;
+    }
+}
diff --git a/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/NetUtils.java b/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/NetUtils.java
new file mode 100644 (file)
index 0000000..0320cf6
--- /dev/null
@@ -0,0 +1,521 @@
+/*
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.net.Inet4Address;
+import java.net.Inet6Address;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.Arrays;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utility class containing the common utility functions needed for operating on
+ * networking data structures
+ */
+public abstract class NetUtils {
+    protected static final Logger logger = LoggerFactory.getLogger(NetUtils.class);
+    /**
+     * Constant holding the number of bits in a byte
+     */
+    public static final int NumBitsInAByte = 8;
+
+    /**
+     * Constant holding the number of bytes in MAC Address
+     */
+    public static final int MACAddrLengthInBytes = 6;
+
+    /**
+     * Constant holding the number of words in MAC Address
+     */
+    public static final int MACAddrLengthInWords = 3;
+
+    /**
+     * Constant holding the broadcast MAC address
+     */
+    private static final byte[] BroadcastMACAddr = {-1, -1, -1, -1, -1, -1};
+
+    /**
+     * Converts a 4 bytes array into an integer number
+     *
+     * @param ba
+     *            the 4 bytes long byte array
+     * @return the integer number
+     */
+    public static int byteArray4ToInt(byte[] ba) {
+        if (ba == null || ba.length != 4) {
+            return 0;
+        }
+        return (0xff & ba[0]) << 24 | (0xff & ba[1]) << 16 | (0xff & ba[2]) << 8 | (0xff & ba[3]);
+    }
+
+    /**
+     * Converts a 6 bytes array into a long number MAC addresses.
+     *
+     * @param ba
+     *            The 6 bytes long byte array.
+     * @return The long number.
+     *         Zero is returned if {@code ba} is {@code null} or
+     *         the length of it is not six.
+     */
+    public static long byteArray6ToLong(byte[] ba) {
+        if (ba == null || ba.length != MACAddrLengthInBytes) {
+            return 0L;
+        }
+        long num = 0L;
+        int i = 0;
+        do {
+            num <<= NumBitsInAByte;
+            num |= 0xff & ba[i];
+            i++;
+        } while (i < MACAddrLengthInBytes);
+        return num;
+    }
+
+    /**
+     * Converts a long number to a 6 bytes array for MAC addresses.
+     *
+     * @param addr
+     *            The long number.
+     * @return The byte array.
+     */
+    public static byte[] longToByteArray6(long addr){
+        byte[] mac = new byte[MACAddrLengthInBytes];
+        int i = MACAddrLengthInBytes - 1;
+        do {
+            mac[i] = (byte) addr;
+            addr >>>= NumBitsInAByte;
+            i--;
+        } while (i >= 0);
+        return mac;
+    }
+
+    /**
+     * Converts an integer number into a 4 bytes array
+     *
+     * @param i
+     *            the integer number
+     * @return the byte array
+     */
+    public static byte[] intToByteArray4(int i) {
+        return new byte[] { (byte) ((i >> 24) & 0xff), (byte) ((i >> 16) & 0xff), (byte) ((i >> 8) & 0xff),
+                (byte) (i & 0xff) };
+    }
+
+    /**
+     * Converts an IP address passed as integer value into the respective
+     * InetAddress object
+     *
+     * @param address
+     *            the IP address in integer form
+     * @return the IP address in InetAddress form
+     */
+    public static InetAddress getInetAddress(int address) {
+        InetAddress ip = null;
+        try {
+            ip = InetAddress.getByAddress(NetUtils.intToByteArray4(address));
+        } catch (UnknownHostException e) {
+            logger.error("", e);
+        }
+        return ip;
+    }
+
+    /**
+     * Return the InetAddress Network Mask given the length of the prefix bit
+     * mask. The prefix bit mask indicates the contiguous leading bits that are
+     * NOT masked out. Example: A prefix bit mask length of 8 will give an
+     * InetAddress Network Mask of 255.0.0.0
+     *
+     * @param prefixMaskLength
+     *            integer representing the length of the prefix network mask
+     * @param isV6
+     *            boolean representing the IP version of the returned address
+     * @return
+     */
+    public static InetAddress getInetNetworkMask(int prefixMaskLength, boolean isV6) {
+        if (prefixMaskLength < 0 || (!isV6 && prefixMaskLength > 32) || (isV6 && prefixMaskLength > 128)) {
+            return null;
+        }
+        byte v4Address[] = { 0, 0, 0, 0 };
+        byte v6Address[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+        byte address[] = (isV6) ? v6Address : v4Address;
+        int numBytes = prefixMaskLength / 8;
+        int numBits = prefixMaskLength % 8;
+        int i = 0;
+        for (; i < numBytes; i++) {
+            address[i] = (byte) 0xff;
+        }
+        if (numBits > 0) {
+            int rem = 0;
+            for (int j = 0; j < numBits; j++) {
+                rem |= 1 << (7 - j);
+            }
+            address[i] = (byte) rem;
+        }
+
+        try {
+            return InetAddress.getByAddress(address);
+        } catch (UnknownHostException e) {
+            logger.error("", e);
+        }
+        return null;
+    }
+
+    /**
+     * Returns the prefix size in bits of the specified subnet mask. Example:
+     * For the subnet mask ff.ff.ff.e0 it returns 25 while for ff.00.00.00 it
+     * returns 8. If the passed subnetMask array is null, 0 is returned.
+     *
+     * @param subnetMask
+     *            the subnet mask as byte array
+     * @return the prefix length as number of bits
+     */
+    public static int getSubnetMaskLength(byte[] subnetMask) {
+        int maskLength = 0;
+        if (subnetMask != null && (subnetMask.length == 4 || subnetMask.length == 16)) {
+            int index = 0;
+            while (index < subnetMask.length && subnetMask[index] == (byte) 0xFF) {
+                maskLength += NetUtils.NumBitsInAByte;
+                index++;
+            }
+            if (index != subnetMask.length) {
+                int bits = NetUtils.NumBitsInAByte - 1;
+                while (bits >= 0 && (subnetMask[index] & 1 << bits)  != 0) {
+                    bits--;
+                    maskLength++;
+                }
+            }
+        }
+        return maskLength;
+    }
+
+    /**
+     * Returns the prefix size in bits of the specified subnet mask. Example:
+     * For the subnet mask 255.255.255.128 it returns 25 while for 255.0.0.0 it
+     * returns 8. If the passed subnetMask object is null, 0 is returned
+     *
+     * @param subnetMask
+     *            the subnet mask as InetAddress
+     * @return the prefix length as number of bits
+     */
+    public static int getSubnetMaskLength(InetAddress subnetMask) {
+        return subnetMask == null ? 0 : NetUtils.getSubnetMaskLength(subnetMask.getAddress());
+    }
+
+    /**
+     * Given an IP address and a prefix network mask length, it returns the
+     * equivalent subnet prefix IP address Example: for ip = "172.28.30.254" and
+     * maskLen = 25 it will return "172.28.30.128"
+     *
+     * @param ip
+     *            the IP address in InetAddress form
+     * @param maskLen
+     *            the length of the prefix network mask
+     * @return the subnet prefix IP address in InetAddress form
+     */
+    public static InetAddress getSubnetPrefix(InetAddress ip, int maskLen) {
+        int bytes = maskLen / 8;
+        int bits = maskLen % 8;
+        byte modifiedByte;
+        byte[] sn = ip.getAddress();
+        if (bits > 0) {
+            modifiedByte = (byte) (sn[bytes] >> (8 - bits));
+            sn[bytes] = (byte) (modifiedByte << (8 - bits));
+            bytes++;
+        }
+        for (; bytes < sn.length; bytes++) {
+            sn[bytes] = (byte) (0);
+        }
+        try {
+            return InetAddress.getByAddress(sn);
+        } catch (UnknownHostException e) {
+            return null;
+        }
+    }
+
+    /**
+     * Checks if the test address and mask conflicts with the filter address and
+     * mask
+     *
+     * For example:
+     * testAddress: 172.28.2.23
+     * testMask: 255.255.255.0
+     * filterAddress: 172.28.1.10
+     * testMask: 255.255.255.0
+     * do conflict
+     *
+     * testAddress: 172.28.2.23
+     * testMask: 255.255.255.0
+     * filterAddress: 172.28.1.10
+     * testMask: 255.255.0.0
+     * do not conflict
+     *
+     * Null parameters are permitted
+     *
+     * @param testAddress
+     * @param filterAddress
+     * @param testMask
+     * @param filterMask
+     * @return
+     */
+    public static boolean inetAddressConflict(InetAddress testAddress, InetAddress filterAddress, InetAddress testMask,
+            InetAddress filterMask) {
+        // Sanity check
+        if ((testAddress == null) || (filterAddress == null)) {
+            return false;
+        }
+
+        // Presence check
+        if (isAny(testAddress) || isAny(filterAddress)) {
+            return false;
+        }
+
+        int testMaskLen = (testMask == null) ? ((testAddress instanceof Inet4Address) ? 32 : 128) : NetUtils
+                .getSubnetMaskLength(testMask);
+        int filterMaskLen = (filterMask == null) ? ((testAddress instanceof Inet4Address) ? 32 : 128) : NetUtils
+                .getSubnetMaskLength(filterMask);
+
+        // Mask length check. Test mask has to be more specific than filter one
+        if (testMaskLen < filterMaskLen) {
+            return true;
+        }
+
+        // Subnet Prefix on filter mask length must be the same
+        InetAddress prefix1 = getSubnetPrefix(testAddress, filterMaskLen);
+        InetAddress prefix2 = getSubnetPrefix(filterAddress, filterMaskLen);
+        return (!prefix1.equals(prefix2));
+    }
+
+    /**
+     * Returns true if the passed MAC address is all zero
+     *
+     * @param mac
+     *            the byte array representing the MAC address
+     * @return true if all MAC bytes are zero
+     */
+    public static boolean isZeroMAC(byte[] mac) {
+        for (short i = 0; i < 6; i++) {
+            if (mac[i] != 0) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Returns true if the MAC address is the broadcast MAC address and false
+     * otherwise.
+     *
+     * @param MACAddress
+     * @return
+     */
+    public static boolean isBroadcastMACAddr(byte[] MACAddress) {
+        if (MACAddress.length == MACAddrLengthInBytes) {
+            for (int i = 0; i < 6; i++) {
+                if (MACAddress[i] != BroadcastMACAddr[i]) {
+                    return false;
+                }
+            }
+            return true;
+        }
+
+        return false;
+    }
+    /**
+     * Returns true if the MAC address is a unicast MAC address and false
+     * otherwise.
+     *
+     * @param MACAddress
+     * @return
+     */
+    public static boolean isUnicastMACAddr(byte[] MACAddress) {
+        if (MACAddress.length == MACAddrLengthInBytes) {
+            return (MACAddress[0] & 1) == 0;
+        }
+        return false;
+    }
+
+    /**
+     * Returns true if the MAC address is a multicast MAC address and false
+     * otherwise. Note that this explicitly returns false for the broadcast MAC
+     * address.
+     *
+     * @param MACAddress
+     * @return
+     */
+    public static boolean isMulticastMACAddr(byte[] MACAddress) {
+        if (MACAddress.length == MACAddrLengthInBytes && !isBroadcastMACAddr(MACAddress)) {
+            return (MACAddress[0] & 1) != 0;
+        }
+        return false;
+    }
+
+    /**
+     * Returns true if the passed InetAddress contains all zero
+     *
+     * @param ip
+     *            the IP address to test
+     * @return true if the address is all zero
+     */
+    public static boolean isAny(InetAddress ip) {
+        for (byte b : ip.getAddress()) {
+            if (b != 0) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    public static boolean fieldsConflict(int field1, int field2) {
+        if ((field1 == 0) || (field2 == 0) || (field1 == field2)) {
+            return false;
+        }
+        return true;
+    }
+
+    public static InetAddress parseInetAddress(String addressString) {
+        InetAddress address = null;
+        try {
+            address = InetAddress.getByName(addressString);
+        } catch (UnknownHostException e) {
+            logger.error("", e);
+        }
+        return address;
+    }
+
+    /**
+     * Checks if the passed IP v4 address in string form is valid The address
+     * may specify a mask at the end as "/MM"
+     *
+     * @param cidr
+     *            the v4 address as A.B.C.D/MM
+     * @return
+     */
+    public static boolean isIPv4AddressValid(String cidr) {
+        if (cidr == null) {
+            return false;
+        }
+
+        String values[] = cidr.split("/");
+        Pattern ipv4Pattern = Pattern
+                .compile("(([01]?\\d\\d?|2[0-4]\\d|25[0-5])\\.){3}([01]?\\d\\d?|2[0-4]\\d|25[0-5])");
+        Matcher mm = ipv4Pattern.matcher(values[0]);
+        if (!mm.matches()) {
+            return false;
+        }
+        if (values.length >= 2) {
+            int prefix = Integer.valueOf(values[1]);
+            if ((prefix < 0) || (prefix > 32)) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Checks if the passed IP v6 address in string form is valid The address
+     * may specify a mask at the end as "/MMM"
+     *
+     * @param cidr
+     *            the v6 address as A::1/MMM
+     * @return
+     */
+    public static boolean isIPv6AddressValid(String cidr) {
+        if (cidr == null) {
+            return false;
+        }
+
+        String values[] = cidr.split("/");
+        try {
+            // when given an IP address, InetAddress.getByName validates the ip
+            // address
+            InetAddress addr = InetAddress.getByName(values[0]);
+            if (!(addr instanceof Inet6Address)) {
+                return false;
+            }
+        } catch (UnknownHostException ex) {
+            return false;
+        }
+
+        if (values.length >= 2) {
+            int prefix = Integer.valueOf(values[1]);
+            if ((prefix < 0) || (prefix > 128)) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Checks if the passed IP address in string form is a valid v4 or v6
+     * address. The address may specify a mask at the end as "/MMM"
+     *
+     * @param cidr
+     *            the v4 or v6 address as IP/MMM
+     * @return
+     */
+    public static boolean isIPAddressValid(String cidr) {
+        return NetUtils.isIPv4AddressValid(cidr) || NetUtils.isIPv6AddressValid(cidr);
+    }
+
+    /*
+     * Following utilities are useful when you need to compare or bit shift java
+     * primitive type variable which are inherently signed
+     */
+    /**
+     * Returns the unsigned value of the passed byte variable
+     *
+     * @param b
+     *            the byte value
+     * @return the int variable containing the unsigned byte value
+     */
+    public static int getUnsignedByte(byte b) {
+        return b & 0xFF;
+    }
+
+    /**
+     * Return the unsigned value of the passed short variable
+     *
+     * @param s
+     *            the short value
+     * @return the int variable containing the unsigned short value
+     */
+    public static int getUnsignedShort(short s) {
+        return s & 0xFFFF;
+    }
+
+    /**
+     * Returns the highest v4 or v6 InetAddress
+     *
+     * @param v6
+     *            true for IPv6, false for Ipv4
+     * @return The highest IPv4 or IPv6 address
+     */
+    public static InetAddress gethighestIP(boolean v6) {
+        try {
+            return (v6) ? InetAddress.getByName("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff") : InetAddress
+                    .getByName("255.255.255.255");
+        } catch (UnknownHostException e) {
+            return null;
+        }
+    }
+
+    /**
+     * Returns Broadcast MAC Address
+     *
+     * @return the byte array containing  broadcast mac address
+     */
+    public static byte[] getBroadcastMACAddr() {
+        return Arrays.copyOf(BroadcastMACAddr, BroadcastMACAddr.length);
+    }
+}
diff --git a/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/Packet.java b/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/Packet.java
new file mode 100644 (file)
index 0000000..2af1852
--- /dev/null
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2013-2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.liblldp;
+
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Abstract class which represents the generic network packet object It provides
+ * the basic methods which are common for all the packets, like serialize and
+ * deserialize
+ */
+
+public abstract class Packet {
+    protected static final Logger logger = LoggerFactory
+            .getLogger(Packet.class);
+    // Access level granted to this packet
+    protected boolean writeAccess;
+    // When deserialized from wire, packet could result corrupted
+    protected boolean corrupted;
+    // The packet that encapsulate this packet
+    protected Packet parent;
+    // The packet encapsulated by this packet
+    protected Packet payload;
+    // The unparsed raw payload carried by this packet
+    protected byte[] rawPayload;
+    // Bit coordinates of packet header fields
+    protected Map<String, Pair<Integer, Integer>> hdrFieldCoordMap;
+    // Header fields values: Map<FieldName,Value>
+    protected Map<String, byte[]> hdrFieldsMap;
+    // The class of the encapsulated packet object
+    protected Class<? extends Packet> payloadClass;
+
+    public Packet() {
+        writeAccess = false;
+        corrupted = false;
+    }
+
+    public Packet(boolean writeAccess) {
+        this.writeAccess = writeAccess;
+        corrupted = false;
+    }
+
+    public Packet getParent() {
+        return parent;
+    }
+
+    public Packet getPayload() {
+        return payload;
+    }
+
+    public void setParent(Packet parent) {
+        this.parent = parent;
+    }
+
+    public void setPayload(Packet payload) {
+        this.payload = payload;
+    }
+
+    public void setHeaderField(String headerField, byte[] readValue) {
+        hdrFieldsMap.put(headerField, readValue);
+    }
+
+    /**
+     * This method deserializes the data bits obtained from the wire into the
+     * respective header and payload which are of type Packet
+     *
+     * @param byte[] data - data from wire to deserialize
+     * @param int bitOffset bit position where packet header starts in data
+     *        array
+     * @param int size of packet in bits
+     * @return Packet
+     * @throws PacketException
+     */
+    public Packet deserialize(byte[] data, int bitOffset, int size)
+            throws PacketException {
+
+        // Deserialize the header fields one by one
+        int startOffset = 0, numBits = 0;
+        for (Entry<String, Pair<Integer, Integer>> pairs : hdrFieldCoordMap
+                .entrySet()) {
+            String hdrField = pairs.getKey();
+            startOffset = bitOffset + this.getfieldOffset(hdrField);
+            numBits = this.getfieldnumBits(hdrField);
+
+            byte[] hdrFieldBytes = null;
+            try {
+                hdrFieldBytes = BitBufferHelper.getBits(data, startOffset,
+                        numBits);
+            } catch (BufferException e) {
+                throw new PacketException(e.getMessage());
+            }
+
+            /*
+             * Store the raw read value, checks the payload type and set the
+             * payloadClass accordingly
+             */
+            this.setHeaderField(hdrField, hdrFieldBytes);
+
+            if (logger.isTraceEnabled()) {
+                logger.trace("{}: {}: {} (offset {} bitsize {})",
+                        new Object[] { this.getClass().getSimpleName(), hdrField,
+                        HexEncode.bytesToHexString(hdrFieldBytes),
+                        startOffset, numBits });
+            }
+        }
+
+        // Deserialize the payload now
+        int payloadStart = startOffset + numBits;
+        int payloadSize = data.length * NetUtils.NumBitsInAByte - payloadStart;
+
+        if (payloadClass != null) {
+            try {
+                payload = payloadClass.newInstance();
+            } catch (Exception e) {
+                throw new RuntimeException(
+                        "Error parsing payload for Ethernet packet", e);
+            }
+            payload.deserialize(data, payloadStart, payloadSize);
+            payload.setParent(this);
+        } else {
+            /*
+             *  The payload class was not set, it means no class for parsing
+             *  this payload is present. Let's store the raw payload if any.
+             */
+            int start = payloadStart / NetUtils.NumBitsInAByte;
+            int stop = start + payloadSize / NetUtils.NumBitsInAByte;
+            rawPayload = Arrays.copyOfRange(data, start, stop);
+        }
+
+
+        // Take care of computation that can be done only after deserialization
+        postDeserializeCustomOperation(data, payloadStart - getHeaderSize());
+
+        return this;
+    }
+
+    /**
+     * This method serializes the header and payload from the respective
+     * packet class, into a single stream of bytes to be sent on the wire
+     *
+     * @return The byte array representing the serialized Packet
+     * @throws PacketException
+     */
+    public byte[] serialize() throws PacketException {
+
+        // Acquire or compute the serialized payload
+        byte[] payloadBytes = null;
+        if (payload != null) {
+            payloadBytes = payload.serialize();
+        } else if (rawPayload != null) {
+            payloadBytes = rawPayload;
+        }
+        int payloadSize = (payloadBytes == null) ? 0 : payloadBytes.length;
+
+        // Allocate the buffer to contain the full (header + payload) packet
+        int headerSize = this.getHeaderSize() / NetUtils.NumBitsInAByte;
+        byte packetBytes[] = new byte[headerSize + payloadSize];
+        if (payloadBytes != null) {
+            System.arraycopy(payloadBytes, 0, packetBytes, headerSize, payloadSize);
+        }
+
+        // Serialize this packet header, field by field
+        for (Map.Entry<String, Pair<Integer, Integer>> pairs : hdrFieldCoordMap
+                .entrySet()) {
+            String field = pairs.getKey();
+            byte[] fieldBytes = hdrFieldsMap.get(field);
+            // Let's skip optional fields when not set
+            if (fieldBytes != null) {
+                try {
+                    BitBufferHelper.setBytes(packetBytes, fieldBytes,
+                            getfieldOffset(field), getfieldnumBits(field));
+                } catch (BufferException e) {
+                    throw new PacketException(e.getMessage());
+                }
+            }
+        }
+
+        // Perform post serialize operations (like checksum computation)
+        postSerializeCustomOperation(packetBytes);
+
+        if (logger.isTraceEnabled()) {
+            logger.trace("{}: {}", this.getClass().getSimpleName(),
+                    HexEncode.bytesToHexString(packetBytes));
+        }
+
+        return packetBytes;
+    }
+
+    /**
+     * This method gets called at the end of the serialization process It is
+     * intended for the child packets to insert some custom data into the output
+     * byte stream which cannot be done or cannot be done efficiently during the
+     * normal Packet.serialize() path. An example is the checksum computation
+     * for IPv4
+     *
+     * @param byte[] - serialized bytes
+     * @throws PacketException
+     */
+    protected void postSerializeCustomOperation(byte[] myBytes)
+            throws PacketException {
+        // no op
+    }
+
+    /**
+     * This method re-computes the checksum of the bits received on the wire and
+     * validates it with the checksum in the bits received Since the computation
+     * of checksum varies based on the protocol, this method is overridden.
+     * Currently only IPv4 and ICMP do checksum computation and validation. TCP
+     * and UDP need to implement these if required.
+     *
+     * @param byte[] data The byte stream representing the Ethernet frame
+     * @param int startBitOffset The bit offset from where the byte array corresponding to this Packet starts in the frame
+     * @throws PacketException
+     */
+    protected void postDeserializeCustomOperation(byte[] data, int startBitOffset)
+            throws PacketException {
+        // no op
+    }
+
+    /**
+     * Gets the header length in bits
+     *
+     * @return int the header length in bits
+     */
+    public int getHeaderSize() {
+        int size = 0;
+        /*
+         * We need to iterate over the fields that were read in the frame
+         * (hdrFieldsMap) not all the possible ones described in
+         * hdrFieldCoordMap. For ex, 802.1Q may or may not be there
+         */
+        for (Map.Entry<String, byte[]> fieldEntry : hdrFieldsMap.entrySet()) {
+            if (fieldEntry.getValue() != null) {
+                String field = fieldEntry.getKey();
+                size += getfieldnumBits(field);
+            }
+        }
+        return size;
+    }
+
+    /**
+     * This method fetches the start bit offset for header field specified by
+     * 'fieldname'. The offset is present in the hdrFieldCoordMap of the
+     * respective packet class
+     *
+     * @param String
+     *            fieldName
+     * @return Integer - startOffset of the requested field
+     */
+    public int getfieldOffset(String fieldName) {
+        return hdrFieldCoordMap.get(fieldName).getLeft();
+    }
+
+    /**
+     * This method fetches the number of bits for header field specified by
+     * 'fieldname'. The numBits are present in the hdrFieldCoordMap of the
+     * respective packet class
+     *
+     * @param String
+     *            fieldName
+     * @return Integer - number of bits of the requested field
+     */
+    public int getfieldnumBits(String fieldName) {
+        return hdrFieldCoordMap.get(fieldName).getRight();
+    }
+
+    @Override
+    public String toString() {
+        StringBuilder ret = new StringBuilder();
+        ret.append(this.getClass().getSimpleName());
+        ret.append(": [");
+        for (String field : hdrFieldCoordMap.keySet()) {
+            byte[] value = hdrFieldsMap.get(field);
+            ret.append(field);
+            ret.append(": ");
+            ret.append(HexEncode.bytesToHexString(value));
+            ret.append(", ");
+        }
+        ret.replace(ret.length()-2, ret.length()-1, "]");
+        return ret.toString();
+    }
+
+    /**
+     * Returns the raw payload carried by this packet in case payload was not
+     * parsed. Caller can call this function in case the getPaylod() returns null.
+     *
+     * @return The raw payload if not parsable as an array of bytes, null otherwise
+     */
+    public byte[] getRawPayload() {
+        return rawPayload;
+    }
+
+    /**
+     * Set a raw payload in the packet class
+     *
+     * @param payload The raw payload as byte array
+     */
+    public void setRawPayload(byte[] payload) {
+        this.rawPayload = Arrays.copyOf(payload, payload.length);
+    }
+
+    /**
+     * Return whether the deserialized packet is to be considered corrupted.
+     * This is the case when the checksum computed after reconstructing the
+     * packet received from wire is not equal to the checksum read from the
+     * stream. For the Packet class which do not have a checksum field, this
+     * function will always return false.
+     *
+     *
+     * @return true if the deserialized packet's recomputed checksum is not
+     *         equal to the packet carried checksum
+     */
+    public boolean isCorrupted() {
+        return corrupted;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = super.hashCode();
+        result = prime * result
+                + ((this.hdrFieldsMap == null) ? 0 : hdrFieldsMap.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+        if (getClass() != obj.getClass()) {
+            return false;
+        }
+        Packet other = (Packet) obj;
+        if (hdrFieldsMap == other.hdrFieldsMap) {
+            return true;
+        }
+        if (hdrFieldsMap == null || other.hdrFieldsMap == null) {
+            return false;
+        }
+        if (hdrFieldsMap != null && other.hdrFieldsMap != null) {
+            for (String field : hdrFieldsMap.keySet()) {
+                if (!Arrays.equals(hdrFieldsMap.get(field), other.hdrFieldsMap.get(field))) {
+                    return false;
+                }
+            }
+        } else {
+            return false;
+        }
+        return true;
+    }
+}
diff --git a/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/PacketException.java b/opendaylight/commons/liblldp/src/main/java/org/opendaylight/controller/liblldp/PacketException.java
new file mode 100644 (file)
index 0000000..c69fc03
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.liblldp;
+
+/**
+ * Describes an exception that is raised when the process of serializing or
+ * deserializing a network packet/stream fails. This generally happens when the
+ * packet/stream is malformed.
+ *
+ */
+public class PacketException extends Exception {
+    private static final long serialVersionUID = 1L;
+
+    public PacketException(String message) {
+        super(message);
+    }
+}
diff --git a/opendaylight/commons/liblldp/src/test/java/org/opendaylight/controller/sal/packet/BitBufferHelperTest.java b/opendaylight/commons/liblldp/src/test/java/org/opendaylight/controller/sal/packet/BitBufferHelperTest.java
new file mode 100644 (file)
index 0000000..07fbf05
--- /dev/null
@@ -0,0 +1,693 @@
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.sal.packet;
+
+import junit.framework.Assert;
+
+import org.junit.Test;
+import org.opendaylight.controller.liblldp.BitBufferHelper;
+
+public class BitBufferHelperTest {
+
+    @Test
+    public void testGetByte() {
+        byte[] data = { 100 };
+        Assert.assertTrue(BitBufferHelper.getByte(data) == 100);
+    }
+
+    @Test
+    public void testGetBits() throws Exception {
+        byte[] data = { 10, 12, 14, 20, 55, 69, 82, 97, 109, 117, 127, -50 };
+        byte[] bits;
+
+        bits = BitBufferHelper.getBits(data, 88, 8); //BYTE extraOffsetBits = extranumBits = 0
+        Assert.assertTrue(bits[0] == -50);
+
+        bits = BitBufferHelper.getBits(data, 8, 16); //Short
+        Assert.assertTrue(bits[0] == 12);
+        Assert.assertTrue(bits[1] == 14);
+
+        bits = BitBufferHelper.getBits(data, 32, 32); //Int
+        Assert.assertTrue(bits[0] == 55);
+        Assert.assertTrue(bits[1] == 69);
+        Assert.assertTrue(bits[2] == 82);
+        Assert.assertTrue(bits[3] == 97);
+
+        bits = BitBufferHelper.getBits(data, 16, 48); //Long
+        Assert.assertTrue(bits[0] == 14);
+        Assert.assertTrue(bits[1] == 20);
+        Assert.assertTrue(bits[2] == 55);
+        Assert.assertTrue(bits[3] == 69);
+        Assert.assertTrue(bits[4] == 82);
+        Assert.assertTrue(bits[5] == 97);
+
+        bits = BitBufferHelper.getBits(data, 40, 7); //BYTE extraOffsetBits = extranumBits != 0
+        Assert.assertTrue(bits[0] == 34);
+
+        bits = BitBufferHelper.getBits(data, 8, 13); //Short
+        Assert.assertTrue(bits[0] == 1);
+        Assert.assertTrue(bits[1] == -127);
+
+        bits = BitBufferHelper.getBits(data, 32, 28); //Int
+        Assert.assertTrue(bits[0] == 3);
+        Assert.assertTrue(bits[1] == 116);
+        Assert.assertTrue(bits[2] == 85);
+        Assert.assertTrue(bits[3] == 38);
+
+        bits = BitBufferHelper.getBits(data, 16, 41); //Long
+        Assert.assertTrue(bits[0] == 0);
+        Assert.assertTrue(bits[1] == 28);
+        Assert.assertTrue(bits[2] == 40);
+        Assert.assertTrue(bits[3] == 110);
+        Assert.assertTrue(bits[4] == -118);
+        Assert.assertTrue(bits[5] == -92);
+
+        bits = BitBufferHelper.getBits(data, 3, 7); //BYTE extraOffsetBits != 0; extranumBits == 0
+        Assert.assertTrue(bits[0] == 40);
+
+        bits = BitBufferHelper.getBits(data, 13, 16); //Short
+        Assert.assertTrue(bits[0] == -127);
+        Assert.assertTrue(bits[1] == -62);
+
+        bits = BitBufferHelper.getBits(data, 5, 32); //Int
+        Assert.assertTrue(bits[0] == 65);
+        Assert.assertTrue(bits[1] == -127);
+        Assert.assertTrue(bits[2] == -62);
+        Assert.assertTrue(bits[3] == -122);
+
+        bits = BitBufferHelper.getBits(data, 23, 48); //Long
+        Assert.assertTrue(bits[0] == 10);
+        Assert.assertTrue(bits[1] == 27);
+        Assert.assertTrue(bits[2] == -94);
+        Assert.assertTrue(bits[3] == -87);
+        Assert.assertTrue(bits[4] == 48);
+        Assert.assertTrue(bits[5] == -74);
+
+        bits = BitBufferHelper.getBits(data, 66, 9); //BYTE extraOffsetBits != 0; extranumBits != 0
+        Assert.assertTrue(bits[0] == 1);
+        Assert.assertTrue(bits[1] == 107);
+
+        bits = BitBufferHelper.getBits(data, 13, 15); //Short
+        Assert.assertTrue(bits[0] == 64);
+        Assert.assertTrue(bits[1] == -31);
+
+        bits = BitBufferHelper.getBits(data, 5, 29); //Int
+        Assert.assertTrue(bits[0] == 8);
+        Assert.assertTrue(bits[1] == 48);
+        Assert.assertTrue(bits[2] == 56);
+        Assert.assertTrue(bits[3] == 80);
+
+        bits = BitBufferHelper.getBits(data, 31, 43); //Long
+        Assert.assertTrue(bits[0] == 0);
+        Assert.assertTrue(bits[1] == -35);
+        Assert.assertTrue(bits[2] == 21);
+        Assert.assertTrue(bits[3] == 73);
+        Assert.assertTrue(bits[4] == -123);
+        Assert.assertTrue(bits[5] == -75);
+
+        bits = BitBufferHelper.getBits(data, 4, 12); //Short
+        Assert.assertTrue(bits[0] == 10);
+        Assert.assertTrue(bits[1] == 12);
+
+        byte[] data1 = { 0, 8 };
+        bits = BitBufferHelper.getBits(data1, 7, 9); //Short
+        Assert.assertTrue(bits[0] == 0);
+        Assert.assertTrue(bits[1] == 8);
+
+        byte[] data2 = { 2, 8 };
+        bits = BitBufferHelper.getBits(data2, 0, 7); //Short
+        Assert.assertTrue(bits[0] == 1);
+
+        bits = BitBufferHelper.getBits(data2, 7, 9); //Short
+        Assert.assertTrue(bits[0] == 0);
+        Assert.assertTrue(bits[1] == 8);
+    }
+
+    // [01101100][01100000]
+    //     [01100011]
+    @Test
+    public void testGetBytes() throws Exception {
+        byte data[] = { 108, 96, 125, -112, 5, 6, 108, 8, 9, 10, 11, 12, 13,
+                14, 15, 16, 17, 18, 19, 20, 21, 22 };
+        byte[] x;
+
+        Assert.assertTrue(BitBufferHelper.getBits(data, 0, 8)[0] == 108);
+        Assert.assertTrue(BitBufferHelper.getBits(data, 8, 8)[0] == 96);
+
+        x = BitBufferHelper.getBits(data, 0, 10);
+        Assert.assertTrue(x[0] == 1);
+        Assert.assertTrue(x[1] == -79);
+
+        x = BitBufferHelper.getBits(data, 3, 8);
+        Assert.assertTrue(x[0] == 99);
+        //Assert.assertTrue(x[1] == 97);
+
+    }
+
+    @Test
+    public void testMSBMask() {
+        int numBits = 1; //MSB
+        int mask = BitBufferHelper.getMSBMask(numBits);
+        Assert.assertTrue(mask == 128);
+
+        numBits = 8;
+        mask = BitBufferHelper.getMSBMask(numBits);
+        Assert.assertTrue(mask == 255);
+
+        numBits = 2;
+        mask = BitBufferHelper.getMSBMask(numBits);
+        Assert.assertTrue(mask == 192);
+    }
+
+    @Test
+    public void testLSBMask() {
+        int numBits = 1; //LSB
+        int mask = BitBufferHelper.getLSBMask(numBits);
+        Assert.assertTrue(mask == 1);
+
+        numBits = 3;
+        mask = BitBufferHelper.getLSBMask(numBits);
+        Assert.assertTrue(mask == 7);
+
+        numBits = 8;
+        mask = BitBufferHelper.getLSBMask(numBits);
+        Assert.assertTrue(mask == 255);
+    }
+
+    @Test
+    public void testToByteArray() {
+        short sh = Short.MAX_VALUE;
+        byte[] data_sh = new byte[Byte.SIZE / 8];
+        data_sh = BitBufferHelper.toByteArray(sh);
+        Assert.assertTrue(data_sh[0] == 127);
+        Assert.assertTrue(data_sh[1] == -1);
+
+        short sh2 = Short.MIN_VALUE;
+        byte[] data_sh2 = new byte[Byte.SIZE / 8];
+        data_sh2 = BitBufferHelper.toByteArray(sh2);
+        Assert.assertTrue(data_sh2[0] == -128);
+        Assert.assertTrue(data_sh2[1] == 0);
+
+        short sh3 = 16384;
+        byte[] data_sh3 = new byte[Byte.SIZE / 8];
+        data_sh3 = BitBufferHelper.toByteArray(sh3);
+        Assert.assertTrue(data_sh3[0] == 64);
+        Assert.assertTrue(data_sh3[1] == 0);
+
+        short sh4 = 146; //TCP headerlenflags - startoffset = 103
+        byte[] data_sh4 = new byte[Byte.SIZE / 8];
+        data_sh4 = BitBufferHelper.toByteArray(sh4);
+        Assert.assertTrue(data_sh4[0] == 0);
+        Assert.assertTrue(data_sh4[1] == -110);
+
+        short sh4_2 = 5000; //IPv4 Offset - startOffset = 51 (to 63)
+        byte[] data_sh4_2 = new byte[Byte.SIZE / 8];
+        data_sh4_2 = BitBufferHelper.toByteArray(sh4_2);
+        Assert.assertTrue(data_sh4_2[0] == 19);
+        Assert.assertTrue(data_sh4_2[1] == -120);
+
+        short sh4_3 = 5312; //numEndRestBits < numBitstoShiftBy
+        byte[] data_sh4_3 = new byte[Byte.SIZE / 8];
+        data_sh4_3 = BitBufferHelper.toByteArray(sh4_3);
+        Assert.assertTrue(data_sh4_3[0] == 20);
+        Assert.assertTrue(data_sh4_3[1] == -64);
+
+        int Int = Integer.MAX_VALUE;
+        byte[] data_Int = new byte[Integer.SIZE / 8];
+        data_Int = BitBufferHelper.toByteArray(Int);
+        Assert.assertTrue(data_Int[0] == 127);
+        Assert.assertTrue(data_Int[1] == -1);
+        Assert.assertTrue(data_Int[2] == -1);
+        Assert.assertTrue(data_Int[3] == -1);
+
+        int Int2 = Integer.MIN_VALUE;
+        byte[] data_Int2 = new byte[Integer.SIZE / 8];
+        data_Int2 = BitBufferHelper.toByteArray(Int2);
+        Assert.assertTrue(data_Int2[0] == -128);
+        Assert.assertTrue(data_Int2[1] == 0);
+        Assert.assertTrue(data_Int2[2] == 0);
+        Assert.assertTrue(data_Int2[3] == 0);
+
+        int Int3 = 1077952576;
+        byte[] data_Int3 = new byte[Integer.SIZE / 8];
+        data_Int3 = BitBufferHelper.toByteArray(Int3);
+        Assert.assertTrue(data_Int3[0] == 64);
+        Assert.assertTrue(data_Int3[1] == 64);
+        Assert.assertTrue(data_Int3[2] == 64);
+        Assert.assertTrue(data_Int3[3] == 64);
+
+        long Lng = Long.MAX_VALUE;
+        byte[] data_lng = new byte[Long.SIZE / 8];
+        data_lng = BitBufferHelper.toByteArray(Lng);
+        Assert.assertTrue(data_lng[0] == 127);
+        Assert.assertTrue(data_lng[1] == -1);
+        Assert.assertTrue(data_lng[2] == -1);
+        Assert.assertTrue(data_lng[3] == -1);
+        Assert.assertTrue(data_lng[4] == -1);
+        Assert.assertTrue(data_lng[5] == -1);
+        Assert.assertTrue(data_lng[6] == -1);
+        Assert.assertTrue(data_lng[7] == -1);
+
+        long Lng2 = Long.MIN_VALUE;
+        byte[] data_lng2 = new byte[Long.SIZE / 8];
+        data_lng2 = BitBufferHelper.toByteArray(Lng2);
+        Assert.assertTrue(data_lng2[0] == -128);
+        Assert.assertTrue(data_lng2[1] == 0);
+        Assert.assertTrue(data_lng2[2] == 0);
+        Assert.assertTrue(data_lng2[3] == 0);
+        Assert.assertTrue(data_lng2[4] == 0);
+        Assert.assertTrue(data_lng2[5] == 0);
+        Assert.assertTrue(data_lng2[6] == 0);
+        Assert.assertTrue(data_lng2[7] == 0);
+
+        byte B = Byte.MAX_VALUE;
+        byte[] data_B = new byte[Byte.SIZE / 8];
+        data_B = BitBufferHelper.toByteArray(B);
+        Assert.assertTrue(data_B[0] == 127);
+
+        byte B1 = Byte.MIN_VALUE;
+        byte[] data_B1 = new byte[Byte.SIZE / 8];
+        data_B1 = BitBufferHelper.toByteArray(B1);
+        Assert.assertTrue(data_B1[0] == -128);
+
+        byte B2 = 64;
+        byte[] data_B2 = new byte[Byte.SIZE / 8];
+        data_B2 = BitBufferHelper.toByteArray(B2);
+        Assert.assertTrue(data_B2[0] == 64);
+
+        byte B3 = 32;
+        byte[] data_B3 = new byte[Byte.SIZE / 8];
+        data_B3 = BitBufferHelper.toByteArray(B3);
+        Assert.assertTrue(data_B3[0] == 32);
+
+    }
+
+    @Test
+    public void testToByteArrayVariable() {
+        int len = 9;
+        byte[] data_sh;
+        data_sh = BitBufferHelper.toByteArray(511, len);
+        Assert.assertTrue(data_sh[0] == (byte) 255);
+        Assert.assertTrue(data_sh[1] == (byte) 128);
+
+        data_sh = BitBufferHelper.toByteArray((int) 511, len);
+        Assert.assertTrue(data_sh[0] == (byte) 255);
+        Assert.assertTrue(data_sh[1] == (byte) 128);
+
+        data_sh = BitBufferHelper.toByteArray((long) 511, len);
+        Assert.assertTrue(data_sh[0] == (byte) 255);
+        Assert.assertTrue(data_sh[1] == (byte) 128);
+    }
+
+    @Test
+    public void testToInt() {
+        byte data[] = { 1 };
+        Assert.assertTrue(BitBufferHelper.toNumber(data) == 1);
+
+        byte data2[] = { 1, 1 };
+        Assert.assertTrue(BitBufferHelper.toNumber(data2) == 257);
+
+        byte data3[] = { 1, 1, 1 };
+        Assert.assertTrue(BitBufferHelper.toNumber(data3) == 65793);
+    }
+
+    @Test
+    public void testToLongGetter() {
+        byte data[] = { 1, 1 };
+        Assert.assertTrue(BitBufferHelper.getLong(data) == 257L);
+    }
+
+    @Test
+    public void testSetByte() throws Exception {
+        byte input;
+        byte[] data = new byte[20];
+
+        input = 125;
+        BitBufferHelper.setByte(data, input, 0, Byte.SIZE);
+        Assert.assertTrue(data[0] == 125);
+
+        input = 109;
+        BitBufferHelper.setByte(data, input, 152, Byte.SIZE);
+        Assert.assertTrue(data[19] == 109);
+    }
+
+    @Test
+    public void testSetBytes() throws Exception {
+        byte[] input = { 0, 1 };
+        byte[] data = { 6, 0 };
+
+        BitBufferHelper.setBytes(data, input, 7, 9);
+        Assert.assertTrue(data[0] == 6);
+        Assert.assertTrue(data[1] == 1);
+    }
+
+    //@Test
+    //INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+    // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]*/
+    public void testInsertBits() throws Exception {
+        //CASE 1: startOffset%8 == 0 && numBits%8 == 0
+        byte inputdata[] = { 75, 110, 107, 80, 10, 12, 35, 100, 125, 65 };
+        int startOffset = 0;
+        int numBits = 8;
+
+        byte data1[] = new byte[2];
+        startOffset = 0;
+        numBits = 16;
+        BitBufferHelper.insertBits(data1, inputdata, startOffset, numBits);
+        Assert.assertTrue(data1[0] == 75);
+        Assert.assertTrue(data1[1] == 110);
+
+        byte data2[] = new byte[4];
+        startOffset = 0;
+        numBits = 32;
+        BitBufferHelper.insertBits(data2, inputdata, startOffset, numBits);
+        Assert.assertTrue(data2[0] == 75);
+        Assert.assertTrue(data2[1] == 110);
+        Assert.assertTrue(data2[2] == 107);
+        Assert.assertTrue(data2[3] == 80);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        // OUTPUT: [01001011] [01101000] = {75, 104}
+        byte data10[] = new byte[2];
+        startOffset = 0;
+        numBits = 13;
+        BitBufferHelper.insertBits(data10, inputdata, startOffset, numBits);
+        Assert.assertTrue(data10[0] == 75);
+        Assert.assertTrue(data10[1] == 104);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        // OUTPUT: [01001000] = {72}
+        byte data11[] = new byte[4];
+        startOffset = 8;
+        numBits = 6;
+        BitBufferHelper.insertBits(data11, inputdata, startOffset, numBits);
+        Assert.assertTrue(data11[1] == 72);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [01001011] [01101110] [01101000] = {75, 110, 105}
+        byte data12[] = new byte[4];
+        startOffset = 0;
+        numBits = 23;
+        BitBufferHelper.insertBits(data12, inputdata, startOffset, numBits);
+        Assert.assertTrue(data12[0] == 75);
+        Assert.assertTrue(data12[1] == 110);
+        Assert.assertTrue(data12[2] == 106);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [01001011] [01101110] [01100000] = {75, 110, 96}
+        byte data13[] = new byte[4];
+        startOffset = 8;
+        numBits = 20;
+        BitBufferHelper.insertBits(data13, inputdata, startOffset, numBits);
+        Assert.assertTrue(data13[1] == 75);
+        Assert.assertTrue(data13[2] == 110);
+        Assert.assertTrue(data13[3] == 96);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [01001011] [01101110] [01101011] [10100000]= {75, 110, 107, 80}
+        byte data14[] = new byte[4];
+        startOffset = 0;
+        numBits = 30;
+        BitBufferHelper.insertBits(data14, inputdata, startOffset, numBits);
+        Assert.assertTrue(data14[0] == 75);
+        Assert.assertTrue(data14[1] == 110);
+        Assert.assertTrue(data14[2] == 107);
+        Assert.assertTrue(data14[3] == 80);
+
+        //CASE 3: startOffset%8 != 0, numBits%8 = 0
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [10100000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [00001001] [11000000] = {72, 96}
+        byte data16[] = new byte[5];
+        startOffset = 3;
+        numBits = 8;
+        BitBufferHelper.insertBits(data16, inputdata, startOffset, numBits);
+        Assert.assertTrue(data16[0] == 9);
+        Assert.assertTrue(data16[1] == 96);
+        Assert.assertTrue(data16[2] == 0);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+        // OUTPUT: [00000100] [1011 0110] [1110 0000] = {4, -54, -96}
+
+        startOffset = 3;
+        numBits = 16;
+        byte data17[] = new byte[5];
+        BitBufferHelper.insertBits(data17, inputdata, startOffset, numBits);
+        Assert.assertTrue(data17[0] == 9);
+        Assert.assertTrue(data17[1] == 109);
+        Assert.assertTrue(data17[2] == -64);
+        Assert.assertTrue(data17[3] == 0);
+
+        // INPUT: {79, 110, 111}
+        // = [01001111] [01101110] [01101111]
+        //OUTPUT: [0000 1001] [1110 1101] [110 00000] = {9, -19, -64}
+        byte data18[] = new byte[5];
+        byte inputdata3[] = { 79, 110, 111 };
+        startOffset = 3;
+        numBits = 16;
+        BitBufferHelper.insertBits(data18, inputdata3, startOffset, numBits);
+        Assert.assertTrue(data18[0] == 9);
+        Assert.assertTrue(data18[1] == -19);
+        Assert.assertTrue(data18[2] == -64);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+        // OUTPUT: [0000 1001] [0110 1101] [1100 1101] [0110 1010] [0000 0001] = {9, 109, -51, 106, 0}
+
+        startOffset = 3;
+        numBits = 32;
+        byte data19[] = new byte[5];
+        BitBufferHelper.insertBits(data19, inputdata, startOffset, numBits);
+        Assert.assertTrue(data19[0] == 9);
+        Assert.assertTrue(data19[1] == 109);
+        Assert.assertTrue(data19[2] == -51);
+        Assert.assertTrue(data19[3] == 106);
+        Assert.assertTrue(data19[4] == 0);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+        // OUTPUT: data[4, 5, 6] = [0 010 0101] [1 011 0111] [0 000 0000] = {37, -73, 0}
+        startOffset = 33;
+        numBits = 16;
+        byte data20[] = new byte[7];
+        BitBufferHelper.insertBits(data20, inputdata, startOffset, numBits);
+        Assert.assertTrue(data20[4] == 37);
+        Assert.assertTrue(data20[5] == -73);
+        Assert.assertTrue(data20[6] == 0);
+
+        //CASE 4: extranumBits != 0 AND extraOffsetBits != 0
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+        // OUTPUT: [0000 1001] [0100 0000]  = {9, 96}
+        startOffset = 3;
+        numBits = 7;
+        byte data21[] = new byte[7];
+        BitBufferHelper.insertBits(data21, inputdata, startOffset, numBits);
+        Assert.assertTrue(data21[0] == 9);
+        Assert.assertTrue(data21[1] == 64);
+        Assert.assertTrue(data21[2] == 0);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+        // OUTPUT: data = [00000 010] [01011 011] [01110 000] = {37, -73, 0}
+        startOffset = 5;
+        numBits = 17;
+        byte data22[] = new byte[7];
+        BitBufferHelper.insertBits(data22, inputdata, startOffset, numBits);
+        Assert.assertTrue(data22[0] == 2);
+        Assert.assertTrue(data22[1] == 91);
+        Assert.assertTrue(data22[2] == 112);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+        // OUTPUT: [0000 1001] [0110 1101] [110 01101] [01 00000] = {9, 109, -51, 64}
+        startOffset = 3;
+        numBits = 23;
+        byte data23[] = new byte[7];
+        BitBufferHelper.insertBits(data23, inputdata, startOffset, numBits);
+        Assert.assertTrue(data23[0] == 9);
+        Assert.assertTrue(data23[1] == 109);
+        Assert.assertTrue(data23[2] == -51);
+        Assert.assertTrue(data23[3] == 64);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+        // OUTPUT: [0000 1001] [0110 1101]  = {9, 109}
+        startOffset = 3;
+        numBits = 13;
+        byte data24[] = new byte[7];
+        BitBufferHelper.insertBits(data24, inputdata, startOffset, numBits);
+        Assert.assertTrue(data24[0] == 9);
+        Assert.assertTrue(data24[1] == 109);
+        Assert.assertTrue(data24[2] == 0);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+        // OUTPUT: [0000 0100] [1011 0110] [1110 0110]  = {4, -74, -26}
+        startOffset = 4;
+        numBits = 20;
+        byte data25[] = new byte[7];
+        BitBufferHelper.insertBits(data25, inputdata, startOffset, numBits);
+        Assert.assertTrue(data25[0] == 4);
+        Assert.assertTrue(data25[1] == -74);
+        Assert.assertTrue(data25[2] == -26);
+        Assert.assertTrue(data25[3] == -0);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+        // OUTPUT: [0000 0010] [0101 1011]   = {0, 2, 91, 0}
+        startOffset = 13;
+        numBits = 11;
+        byte data26[] = new byte[7];
+        BitBufferHelper.insertBits(data26, inputdata, startOffset, numBits);
+        Assert.assertTrue(data26[0] == 0);
+        Assert.assertTrue(data26[1] == 2);
+        Assert.assertTrue(data26[2] == 91);
+        Assert.assertTrue(data26[3] == 0);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+        // OUTPUT: [000 01001] [011 01101] [110 0 0000]   = {9, 109, -64, 0}
+        startOffset = 3;
+        numBits = 17;
+        byte data27[] = new byte[7];
+        BitBufferHelper.insertBits(data27, inputdata, startOffset, numBits);
+        Assert.assertTrue(data27[0] == 9);
+        Assert.assertTrue(data27[1] == 109);
+        Assert.assertTrue(data27[2] == -64);
+        Assert.assertTrue(data27[3] == 0);
+
+        // INPUT: {75, 110, 107, 80, 10, 12, 35, 100, 125, 65} =
+        // [01001011] [01101110] [01101011] [01010000] [00001010] [00001100] [00100011] [01100100] [11111101] [01000001]        //OUTPUT: [00000000] [00000100] [10110110] [11100000]= {0, 4, -54, -96}
+        // OUTPUT: [00 000000] [00 000000] [00 010010] [11 011011] [10 011010] [11 010100] [0000 0000] = {0, 0, 18, -37,-102,-44,0}
+        startOffset = 18;
+        numBits = 34;
+        byte data28[] = new byte[7];
+        BitBufferHelper.insertBits(data28, inputdata, startOffset, numBits);
+        Assert.assertTrue(data28[0] == 0);
+        Assert.assertTrue(data28[1] == 0);
+        Assert.assertTrue(data28[2] == 18);
+        Assert.assertTrue(data28[3] == -37);
+        Assert.assertTrue(data28[4] == -102);
+        Assert.assertTrue(data28[5] == -44);
+        Assert.assertTrue(data28[6] == 0);
+
+    }
+
+    @Test
+    public void testGetShort() throws Exception {
+        byte data[] = new byte[2];
+        data[0] = 7;
+        data[1] = 8;
+        int length = 9; // num bits
+        Assert.assertTrue(BitBufferHelper.getShort(data, length) == 264);
+
+        data[0] = 6;
+        data[1] = 8;
+        short result = BitBufferHelper.getShort(data, length);
+        Assert.assertTrue(result == 8);
+
+        data[0] = 8;
+        data[1] = 47;
+        result = BitBufferHelper.getShort(data, length);
+        Assert.assertTrue(result == 47);
+
+        //[0000 0001] [0001 0100] [0110 0100]
+        byte[] data1 = new byte[2];
+        data1[0] = 1;
+        data1[1] = 20; //data1[2] = 100;
+        length = 15;
+        result = BitBufferHelper.getShort(data1, length);
+        Assert.assertTrue(result == 276);
+
+        byte[] data2 = new byte[2];
+        data2[0] = 64;
+        data2[1] = 99; //data2[2] = 100;
+        length = 13;
+        result = BitBufferHelper.getShort(data2, length);
+        Assert.assertTrue(result == 99);
+
+        byte[] data3 = { 100, 50 };
+        result = BitBufferHelper.getShort(data3);
+        Assert.assertTrue(result == 25650);
+    }
+
+    @Test
+    public void testToIntVarLength() throws Exception {
+        byte data[] = { (byte) 255, (byte) 128 };
+        int length = 9; // num bits
+        Assert.assertTrue(BitBufferHelper.getInt(data, length) == 384);
+
+        byte data2[] = { 0, 8 };
+        Assert.assertTrue(BitBufferHelper.getInt(data2, 9) == 8);
+
+        byte data3[] = { 1, 1, 1 };
+        Assert.assertTrue(BitBufferHelper.getInt(data3) == 65793);
+
+        byte data4[] = { 1, 1, 1 };
+        Assert.assertTrue(BitBufferHelper.getInt(data4) == 65793);
+
+        byte data5[] = { 1, 1 };
+        Assert.assertTrue(BitBufferHelper.getInt(data5) == 257);
+
+    }
+
+    @Test
+    public void testShiftBitstoLSB() {
+        byte[] data = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+
+        byte[] data2 = { 8, 9, 10 };
+        byte[] shiftedBytes2 = BitBufferHelper.shiftBitsToLSB(data2, 11);
+
+        Assert.assertTrue(shiftedBytes2[0] == 0);
+        Assert.assertTrue(shiftedBytes2[1] == 64);
+        Assert.assertTrue(shiftedBytes2[2] == 72);
+
+        byte[] shiftedBytes = BitBufferHelper.shiftBitsToLSB(data, 49);
+
+        Assert.assertTrue(shiftedBytes[0] == 0);
+        Assert.assertTrue(shiftedBytes[1] == 2);
+        Assert.assertTrue(shiftedBytes[2] == 4);
+        Assert.assertTrue(shiftedBytes[3] == 6);
+        Assert.assertTrue(shiftedBytes[4] == 8);
+        Assert.assertTrue(shiftedBytes[5] == 10);
+        Assert.assertTrue(shiftedBytes[6] == 12);
+        Assert.assertTrue(shiftedBytes[7] == 14);
+        Assert.assertTrue(shiftedBytes[8] == 16);
+        Assert.assertTrue(shiftedBytes[9] == 18);
+
+        byte[] data1 = { 1, 2, 3 };
+        byte[] shiftedBytes1 = BitBufferHelper.shiftBitsToLSB(data1, 18);
+        Assert.assertTrue(shiftedBytes1[0] == 0);
+        Assert.assertTrue(shiftedBytes1[1] == 4);
+        Assert.assertTrue(shiftedBytes1[2] == 8);
+
+    }
+
+    @Test
+    public void testShiftBitstoLSBMSB() {
+        byte[] data = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0 };
+
+        byte[] clone = BitBufferHelper.shiftBitsToMSB(BitBufferHelper
+                .shiftBitsToLSB(data, 72), 72);
+
+        Assert.assertTrue(clone[0] == 1);
+        Assert.assertTrue(clone[1] == 2);
+        Assert.assertTrue(clone[2] == 3);
+        Assert.assertTrue(clone[3] == 4);
+        Assert.assertTrue(clone[4] == 5);
+        Assert.assertTrue(clone[5] == 6);
+        Assert.assertTrue(clone[6] == 7);
+        Assert.assertTrue(clone[7] == 8);
+        Assert.assertTrue(clone[8] == 9);
+        Assert.assertTrue(clone[9] == 0);
+    }
+
+}
diff --git a/opendaylight/commons/liblldp/src/test/java/org/opendaylight/controller/sal/packet/address/EthernetAddressTest.java b/opendaylight/commons/liblldp/src/test/java/org/opendaylight/controller/sal/packet/address/EthernetAddressTest.java
new file mode 100644 (file)
index 0000000..cfdc785
--- /dev/null
@@ -0,0 +1,114 @@
+
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+/**
+ * @file   EthernetAddressTest.java
+ *
+ * @brief  Unit Tests for EthernetAddress class
+ *
+ * Unit Tests for EthernetAddress class
+ */
+package org.opendaylight.controller.sal.packet.address;
+
+import org.junit.Assert;
+import org.junit.Test;
+import org.opendaylight.controller.liblldp.ConstructionException;
+import org.opendaylight.controller.liblldp.EthernetAddress;
+
+public class EthernetAddressTest {
+    @Test
+    public void testNonValidConstructor() {
+        @SuppressWarnings("unused")
+        EthernetAddress ea1;
+        // Null input array
+        try {
+            ea1 = new EthernetAddress((byte[]) null);
+
+            // Exception is expected if NOT raised test will fail
+            Assert.assertTrue(false);
+        } catch (ConstructionException e) {
+        }
+
+        // Array too short
+        try {
+            ea1 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0 });
+
+            // Exception is expected if NOT raised test will fail
+            Assert.assertTrue(false);
+        } catch (ConstructionException e) {
+        }
+
+        // Array too long
+        try {
+            ea1 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+                    (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x0,
+                    (byte) 0x0 });
+
+            // Exception is expected if NOT raised test will fail
+            Assert.assertTrue(false);
+        } catch (ConstructionException e) {
+        }
+    }
+
+    @Test
+    public void testEquality() {
+        EthernetAddress ea1;
+        EthernetAddress ea2;
+        try {
+            ea1 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+                    (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x1 });
+
+            ea2 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+                    (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x1 });
+            Assert.assertTrue(ea1.equals(ea2));
+        } catch (ConstructionException e) {
+            // Exception is NOT expected if raised test will fail
+            Assert.assertTrue(false);
+        }
+
+        try {
+            ea1 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+                    (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x1 });
+
+            ea2 = ea1.clone();
+            Assert.assertTrue(ea1.equals(ea2));
+        } catch (ConstructionException e) {
+            // Exception is NOT expected if raised test will fail
+            Assert.assertTrue(false);
+        }
+
+        // Check for well knowns
+        try {
+            ea1 = EthernetAddress.BROADCASTMAC;
+            ea2 = new EthernetAddress(new byte[] { (byte) 0xff, (byte) 0xff,
+                    (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff });
+            Assert.assertTrue(ea1.equals(ea2));
+        } catch (ConstructionException e) {
+            // Exception is NOT expected if raised test will fail
+            Assert.assertTrue(false);
+        }
+    }
+
+    @Test
+    public void testUnEquality() {
+        EthernetAddress ea1;
+        EthernetAddress ea2;
+        try {
+            ea1 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+                    (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x2 });
+
+            ea2 = new EthernetAddress(new byte[] { (byte) 0x0, (byte) 0x0,
+                    (byte) 0x0, (byte) 0x0, (byte) 0x0, (byte) 0x1 });
+            Assert.assertTrue(!ea1.equals(ea2));
+        } catch (ConstructionException e) {
+            // Exception is NOT expected if raised test will fail
+            Assert.assertTrue(false);
+        }
+    }
+}
index 4f678f685473f5289dd73bae023ae5a0a4c271c5..66a403560e132addee3c6ab35181c1daafd457f7 100644 (file)
     <concepts.version>0.5.2-SNAPSHOT</concepts.version>
     <concurrentlinkedhashmap.version>1.4</concurrentlinkedhashmap.version>
     <config.version>0.2.5-SNAPSHOT</config.version>
+    <config.configfile.directory>etc/opendaylight/karaf</config.configfile.directory>
+    <config.netty.configfile>00-netty.xml</config.netty.configfile>
+    <config.mdsal.configfile>01-mdsal.xml</config.mdsal.configfile>
+    <config.netconf.client.configfile>01-netconf.xml</config.netconf.client.configfile>
+    <config.toaster.configfile>03-toaster-sample.xml</config.toaster.configfile>
+    <config.restconf.configfile>10-rest-connector.xml</config.restconf.configfile>
+    <config.netconf.connector.configfile>99-netconf-connector.xml</config.netconf.connector.configfile>
     <configuration.implementation.version>0.4.3-SNAPSHOT</configuration.implementation.version>
     <configuration.version>0.4.3-SNAPSHOT</configuration.version>
     <connectionmanager.version>0.1.2-SNAPSHOT</connectionmanager.version>
@@ -83,6 +90,7 @@
     <!-- OpenEXI third party lib for netconf-->
 
     <exi.nagasena.version>0000.0002.0038.0</exi.nagasena.version>
+    <felix.util.version>1.6.0</felix.util.version>
     <filtervalve.version>1.4.2-SNAPSHOT</filtervalve.version>
     <findbugs.maven.plugin.version>2.4.0</findbugs.maven.plugin.version>
     <flowprogrammer.northbound.version>0.4.2-SNAPSHOT</flowprogrammer.northbound.version>
             <artifactId>akka-osgi_${scala.version}</artifactId>
             <version>${akka.version}</version>
         </dependency>
+        <dependency>
+            <groupId>com.typesafe.akka</groupId>
+            <artifactId>akka-slf4j_${scala.version}</artifactId>
+            <version>${akka.version}</version>
+        </dependency>
       <dependency>
         <groupId>commons-codec</groupId>
         <artifactId>commons-codec</artifactId>
         <artifactId>config-persister-file-xml-adapter</artifactId>
         <version>${config.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>config-persister-feature-adapter</artifactId>
+        <version>${config.version}</version>
+      </dependency>
       <dependency>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>config-persister-impl</artifactId>
         <artifactId>karaf.branding</artifactId>
         <version>${karaf.branding.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>liblldp</artifactId>
+        <version>${sal.version}</version>
+      </dependency>
       <dependency>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>logback-config</artifactId>
         <artifactId>sal-common-util</artifactId>
         <version>${mdsal.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>sal-inmemory-datastore</artifactId>
+        <version>${mdsal.version}</version>
+      </dependency>
       <dependency>
         <groupId>org.opendaylight.controller</groupId>
         <artifactId>sal-compatibility</artifactId>
         <artifactId>toaster-config</artifactId>
         <version>${mdsal.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.opendaylight.yangtools</groupId>
+        <artifactId>features-yangtools</artifactId>
+        <version>${yangtools.version}</version>
+        <classifier>features</classifier>
+        <type>xml</type>
+        <scope>runtime</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.opendaylight.controller.samples</groupId>
+        <artifactId>features-toaster</artifactId>
+        <version>${mdsal.version}</version>
+        <classifier>features</classifier>
+        <type>xml</type>
+        <scope>runtime</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>features-config-netty</artifactId>
+        <version>${config.version}</version>
+        <classifier>features</classifier>
+        <type>xml</type>
+        <scope>runtime</scope>
+      </dependency>
+      <dependency>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>features-flow</artifactId>
+        <version>${mdsal.version}</version>
+        <classifier>features</classifier>
+        <type>xml</type>
+        <scope>runtime</scope>
+      </dependency>
       <dependency>
         <groupId>org.opendaylight.controller.thirdparty</groupId>
         <artifactId>com.sun.jersey.jersey-servlet</artifactId>
       </dependency>
       <dependency>
         <groupId>org.opendaylight.controller</groupId>
-        <artifactId>config-features</artifactId>
+        <artifactId>features-config</artifactId>
         <version>${config.version}</version>
         <classifier>features</classifier>
         <type>xml</type>
       </dependency>
       <dependency>
         <groupId>org.opendaylight.controller</groupId>
-        <artifactId>features-odl-protocol-framework</artifactId>
+        <artifactId>features-protocol-framework</artifactId>
         <version>${protocol-framework.version}</version>
         <classifier>features</classifier>
         <type>xml</type>
       </dependency>
       <dependency>
         <groupId>org.opendaylight.controller</groupId>
-        <artifactId>netconf-features</artifactId>
+        <artifactId>features-netconf</artifactId>
         <version>${netconf.version}</version>
         <classifier>features</classifier>
         <type>xml</type>
       </dependency>
       <dependency>
         <groupId>org.opendaylight.controller</groupId>
-        <artifactId>config-persister-features</artifactId>
+        <artifactId>features-config-persister</artifactId>
         <version>${config.version}</version>
         <classifier>features</classifier>
         <type>xml</type>
       </dependency>
       <dependency>
         <groupId>org.opendaylight.controller</groupId>
-        <artifactId>mdsal-features</artifactId>
+        <artifactId>features-mdsal</artifactId>
         <version>${mdsal.version}</version>
         <classifier>features</classifier>
         <type>xml</type>
index 6381836af856f06c6eba19eccfaa4529da6c1194..828fcb01e11bb00a9fde10f064b93cc2cac34863 100644 (file)
@@ -7,6 +7,14 @@
  */
 package org.opendaylight.controller.config.manager.impl.osgi;
 
+import static org.opendaylight.controller.config.manager.impl.util.OsgiRegistrationUtil.registerService;
+import static org.opendaylight.controller.config.manager.impl.util.OsgiRegistrationUtil.wrap;
+
+import java.lang.management.ManagementFactory;
+import java.util.Arrays;
+import java.util.List;
+import javax.management.InstanceAlreadyExistsException;
+import javax.management.MBeanServer;
 import org.opendaylight.controller.config.manager.impl.ConfigRegistryImpl;
 import org.opendaylight.controller.config.manager.impl.jmx.ConfigRegistryJMXRegistrator;
 import org.opendaylight.controller.config.manager.impl.osgi.mapping.CodecRegistryProvider;
@@ -14,27 +22,19 @@ import org.opendaylight.controller.config.manager.impl.osgi.mapping.ModuleInfoBu
 import org.opendaylight.controller.config.manager.impl.osgi.mapping.RefreshingSCPModuleInfoRegistry;
 import org.opendaylight.controller.config.manager.impl.util.OsgiRegistrationUtil;
 import org.opendaylight.controller.config.spi.ModuleFactory;
+import org.opendaylight.yangtools.sal.binding.generator.impl.GeneratedClassLoadingStrategy;
 import org.opendaylight.yangtools.sal.binding.generator.impl.ModuleInfoBackedContext;
 import org.osgi.framework.BundleActivator;
 import org.osgi.framework.BundleContext;
 import org.osgi.util.tracker.ServiceTracker;
 
-import javax.management.InstanceAlreadyExistsException;
-import javax.management.MBeanServer;
-import java.lang.management.ManagementFactory;
-import java.util.Arrays;
-import java.util.List;
-
-import static org.opendaylight.controller.config.manager.impl.util.OsgiRegistrationUtil.registerService;
-import static org.opendaylight.controller.config.manager.impl.util.OsgiRegistrationUtil.wrap;
-
 public class ConfigManagerActivator implements BundleActivator {
     private final MBeanServer configMBeanServer = ManagementFactory.getPlatformMBeanServer();
 
     private AutoCloseable autoCloseable;
 
     @Override
-    public void start(BundleContext context) {
+    public void start(final BundleContext context) {
 
         ModuleInfoBackedContext moduleInfoBackedContext = ModuleInfoBackedContext.create();// the inner strategy is backed by thread context cl?
 
@@ -63,6 +63,7 @@ public class ConfigManagerActivator implements BundleActivator {
         bundleTracker.open();
 
         // register config registry to OSGi
+        AutoCloseable clsReg = registerService(context, moduleInfoBackedContext, GeneratedClassLoadingStrategy.class);
         AutoCloseable configRegReg = registerService(context, configRegistry, ConfigRegistryImpl.class);
 
         // register config registry to jmx
@@ -79,12 +80,12 @@ public class ConfigManagerActivator implements BundleActivator {
         serviceTracker.open();
 
         List<AutoCloseable> list = Arrays.asList(
-                codecRegistryProvider, configRegistry, wrap(bundleTracker), configRegReg, configRegistryJMXRegistrator, wrap(serviceTracker));
+                codecRegistryProvider, clsReg,configRegistry, wrap(bundleTracker), configRegReg, configRegistryJMXRegistrator, wrap(serviceTracker));
         autoCloseable = OsgiRegistrationUtil.aggregate(list);
     }
 
     @Override
-    public void stop(BundleContext context) throws Exception {
+    public void stop(final BundleContext context) throws Exception {
         autoCloseable.close();
     }
 }
diff --git a/opendaylight/config/config-persister-api/src/main/java/org/opendaylight/controller/config/persist/api/ConfigPusher.java b/opendaylight/config/config-persister-api/src/main/java/org/opendaylight/controller/config/persist/api/ConfigPusher.java
new file mode 100644 (file)
index 0000000..2dade8a
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.config.persist.api;
+
+import java.util.List;
+/*
+ * The config pusher service pushes configs into the config subsystem
+ */
+public interface ConfigPusher {
+
+    /*
+     * Pushes configs into the config subsystem
+     */
+
+    public void pushConfigs(List<? extends ConfigSnapshotHolder> configs) throws InterruptedException;
+}
diff --git a/opendaylight/config/config-persister-feature-adapter/pom.xml b/opendaylight/config/config-persister-feature-adapter/pom.xml
new file mode 100644 (file)
index 0000000..7412a51
--- /dev/null
@@ -0,0 +1,74 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.opendaylight.controller</groupId>
+        <artifactId>config-subsystem</artifactId>
+        <version>0.2.5-SNAPSHOT</version>
+        <relativePath>..</relativePath>
+    </parent>
+
+    <artifactId>config-persister-feature-adapter</artifactId>
+    <packaging>bundle</packaging>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.osgi</groupId>
+            <artifactId>org.osgi.core</artifactId>
+            <scope>provided</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.karaf.features</groupId>
+            <artifactId>org.apache.karaf.features.core</artifactId>
+            <version>${karaf.version}</version>
+            <scope>provided</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>config-persister-impl</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>config-persister-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.opendaylight.controller</groupId>
+            <artifactId>config-persister-directory-xml-adapter</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.felix</groupId>
+            <artifactId>org.apache.felix.utils</artifactId>
+            <version>1.6.0</version>
+            <scope>provided</scope>
+        </dependency>
+        <dependency>
+           <groupId>com.google.guava</groupId>
+           <artifactId>guava</artifactId>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.felix</groupId>
+                <artifactId>maven-bundle-plugin</artifactId>
+                <extensions>true</extensions>
+                <configuration>
+                    <instructions>
+                        <Bundle-SymbolicName>${project.artifactId}</Bundle-SymbolicName>
+                        <Bundle-Version>${project.version}</Bundle-Version>
+                        <Bundle-Activator>org.opendaylight.controller.configpusherfeature.ConfigPusherFeatureActivator</Bundle-Activator>
+                        <Private-Package>
+                            org.apache.karaf.features.internal.model,
+                            org.apache.felix.utils.version,
+                            org.opendaylight.controller.configpusherfeature.internal
+                        </Private-Package>
+                    </instructions>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+
+</project>
diff --git a/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/ConfigPusherFeatureActivator.java b/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/ConfigPusherFeatureActivator.java
new file mode 100644 (file)
index 0000000..ea99579
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature;
+
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
+import org.opendaylight.controller.configpusherfeature.internal.ConfigPusherCustomizer;
+import org.osgi.framework.BundleActivator;
+import org.osgi.framework.BundleContext;
+import org.osgi.util.tracker.ServiceTracker;
+
+public class ConfigPusherFeatureActivator implements BundleActivator {
+
+    BundleContext bc = null;
+    ConfigPusherCustomizer cpc = null;
+    ServiceTracker<ConfigPusher,ConfigPusher> cpst = null;
+
+    public void start(BundleContext context) throws Exception {
+        bc = context;
+        cpc = new ConfigPusherCustomizer();
+        cpst = new ServiceTracker<ConfigPusher, ConfigPusher>(bc, ConfigPusher.class.getName(), cpc);
+        cpst.open();
+    }
+
+    public void stop(BundleContext context) throws Exception {
+        if(cpst != null) {
+            cpst.close();
+            cpst = null;
+        }
+        if(cpc != null) {
+            cpc.close();
+            cpc = null;
+        }
+        bc = null;
+    }
+}
diff --git a/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/AbstractFeatureWrapper.java b/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/AbstractFeatureWrapper.java
new file mode 100644 (file)
index 0000000..1bf2025
--- /dev/null
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+
+import javax.xml.bind.JAXBException;
+
+import org.apache.karaf.features.BundleInfo;
+import org.apache.karaf.features.Conditional;
+import org.apache.karaf.features.ConfigFileInfo;
+import org.apache.karaf.features.Dependency;
+import org.apache.karaf.features.Feature;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/*
+ * Wrap a Feature for the purposes of extracting the FeatureConfigSnapshotHolders from
+ * its underlying ConfigFileInfo's
+ *
+ * Delegates the the contained feature and provides additional methods.
+ */
+public class AbstractFeatureWrapper implements Feature {
+    private static final Logger logger = LoggerFactory.getLogger(AbstractFeatureWrapper.class);
+    protected Feature feature = null;
+
+    protected AbstractFeatureWrapper() {
+        // prevent instantiation without Feature
+    }
+
+    /*
+     * @param f Feature to wrap
+     */
+    public AbstractFeatureWrapper(Feature f) {
+        Preconditions.checkNotNull(f,"FeatureWrapper requires non-null Feature in constructor");
+        this.feature = f;
+    }
+
+    /*
+     * Get FeatureConfigSnapshotHolders appropriate to feed to the config subsystem
+     * from the underlying Feature Config files
+     */
+    public LinkedHashSet<FeatureConfigSnapshotHolder> getFeatureConfigSnapshotHolders() throws Exception {
+        LinkedHashSet <FeatureConfigSnapshotHolder> snapShotHolders = new LinkedHashSet<FeatureConfigSnapshotHolder>();
+        for(ConfigFileInfo c: getConfigurationFiles()) {
+            try {
+                snapShotHolders.add(new FeatureConfigSnapshotHolder(c,this));
+            } catch (JAXBException e) {
+                logger.debug("{} is not a config subsystem config file",c.getFinalname());
+            }
+        }
+        return snapShotHolders;
+    }
+
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((feature == null) ? 0 : feature.hashCode());
+        return result;
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        AbstractFeatureWrapper other = (AbstractFeatureWrapper) obj;
+        if (feature == null) {
+            if (other.feature != null)
+                return false;
+        } else if (!feature.equals(other.feature))
+            return false;
+        return true;
+    }
+
+    @Override
+    public String toString() {
+        return feature.getName();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getId()
+     */
+    public String getId() {
+        return feature.getId();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getName()
+     */
+    public String getName() {
+        return feature.getName();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getDescription()
+     */
+    public String getDescription() {
+        return feature.getDescription();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getDetails()
+     */
+    public String getDetails() {
+        return feature.getDetails();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getVersion()
+     */
+    public String getVersion() {
+        return feature.getVersion();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#hasVersion()
+     */
+    public boolean hasVersion() {
+        return feature.hasVersion();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getResolver()
+     */
+    public String getResolver() {
+        return feature.getResolver();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getInstall()
+     */
+    public String getInstall() {
+        return feature.getInstall();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getDependencies()
+     */
+    public List<Dependency> getDependencies() {
+        return feature.getDependencies();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getBundles()
+     */
+    public List<BundleInfo> getBundles() {
+        return feature.getBundles();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getConfigurations()
+     */
+    public Map<String, Map<String, String>> getConfigurations() {
+        return feature.getConfigurations();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getConfigurationFiles()
+     */
+    public List<ConfigFileInfo> getConfigurationFiles() {
+        return feature.getConfigurationFiles();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getConditional()
+     */
+    public List<? extends Conditional> getConditional() {
+        return feature.getConditional();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getStartLevel()
+     */
+    public int getStartLevel() {
+        return feature.getStartLevel();
+    }
+
+    /**
+     * @return
+     * @see org.apache.karaf.features.Feature#getRegion()
+     */
+    public String getRegion() {
+        return feature.getRegion();
+    }
+
+}
\ No newline at end of file
diff --git a/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/ChildAwareFeatureWrapper.java b/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/ChildAwareFeatureWrapper.java
new file mode 100644 (file)
index 0000000..8d2ae68
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import java.util.LinkedHashSet;
+import java.util.List;
+
+import javax.xml.bind.JAXBException;
+
+import org.apache.felix.utils.version.VersionRange;
+import org.apache.felix.utils.version.VersionTable;
+import org.apache.karaf.features.Dependency;
+import org.apache.karaf.features.Feature;
+import org.apache.karaf.features.FeaturesService;
+import org.osgi.framework.Version;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+
+/*
+ * Wrap a Feature for the purposes of extracting the FeatureConfigSnapshotHolders from
+ * its underlying ConfigFileInfo's and those of its children recursively
+ *
+ * Delegates the the contained feature and provides additional methods.
+ */
+public class ChildAwareFeatureWrapper extends AbstractFeatureWrapper implements Feature {
+    private static final Logger logger = LoggerFactory.getLogger(ChildAwareFeatureWrapper.class);
+    private FeaturesService featuresService= null;
+
+    protected ChildAwareFeatureWrapper(Feature f) {
+        // Don't use without a feature service
+    }
+
+    /*
+     * @param f Feature to wrap
+     * @param s FeaturesService to look up dependencies
+     */
+    ChildAwareFeatureWrapper(Feature f, FeaturesService s) throws Exception {
+        super(s.getFeature(f.getName(), f.getVersion()));
+        Preconditions.checkNotNull(s, "FeatureWrapper requires non-null FeatureService in constructor");
+        this.featuresService = s;
+    }
+
+    protected FeaturesService getFeaturesService() {
+        return featuresService;
+    }
+
+    /*
+     * Get FeatureConfigSnapshotHolders appropriate to feed to the config subsystem
+     * from the underlying Feature Config files and those of its children recursively
+     */
+    public LinkedHashSet <? extends ChildAwareFeatureWrapper> getChildFeatures() throws Exception {
+        List<Dependency> dependencies = feature.getDependencies();
+        LinkedHashSet <ChildAwareFeatureWrapper> childFeatures = new LinkedHashSet<ChildAwareFeatureWrapper>();
+        if(dependencies != null) {
+            for(Dependency dependency: dependencies) {
+                Feature fi = extractFeatureFromDependency(dependency);
+                if(fi != null){
+                    ChildAwareFeatureWrapper wrappedFeature = new ChildAwareFeatureWrapper(fi,featuresService);
+                    childFeatures.add(wrappedFeature);
+                }
+            }
+        }
+        return childFeatures;
+    }
+
+    public LinkedHashSet<FeatureConfigSnapshotHolder> getFeatureConfigSnapshotHolders() throws Exception {
+        LinkedHashSet <FeatureConfigSnapshotHolder> snapShotHolders = new LinkedHashSet<FeatureConfigSnapshotHolder>();
+        for(ChildAwareFeatureWrapper c: getChildFeatures()) {
+            for(FeatureConfigSnapshotHolder h: c.getFeatureConfigSnapshotHolders()) {
+                FeatureConfigSnapshotHolder f;
+                try {
+                    f = new FeatureConfigSnapshotHolder(h,this);
+                    snapShotHolders.add(f);
+                } catch (JAXBException e) {
+                    logger.debug("{} is not a config subsystem config file",h.getFileInfo().getFinalname());
+                }
+            }
+        }
+        snapShotHolders.addAll(super.getFeatureConfigSnapshotHolders());
+        return snapShotHolders;
+    }
+
+    protected Feature extractFeatureFromDependency(Dependency dependency) throws Exception {
+        Feature[] features = featuresService.listFeatures();
+        VersionRange range = org.apache.karaf.features.internal.model.Feature.DEFAULT_VERSION.equals(dependency.getVersion())
+                ? VersionRange.ANY_VERSION : new VersionRange(dependency.getVersion(), true, true);
+        Feature fi = null;
+        for(Feature f: features) {
+            if (f.getName().equals(dependency.getName())) {
+                Version v = VersionTable.getVersion(f.getVersion());
+                if (range.contains(v)) {
+                    if (fi == null || VersionTable.getVersion(fi.getVersion()).compareTo(v) < 0) {
+                        fi = f;
+                        break;
+                    }
+                }
+            }
+        }
+        return fi;
+    }
+
+}
diff --git a/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/ConfigFeaturesListener.java b/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/ConfigFeaturesListener.java
new file mode 100644 (file)
index 0000000..f5f1b85
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.karaf.features.FeatureEvent;
+import org.apache.karaf.features.FeaturesListener;
+import org.apache.karaf.features.FeaturesService;
+import org.apache.karaf.features.RepositoryEvent;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ConfigFeaturesListener implements  FeaturesListener,  AutoCloseable {
+    private static final Logger logger = LoggerFactory.getLogger(ConfigFeaturesListener.class);
+    private static final int QUEUE_SIZE = 100;
+    private BlockingQueue<FeatureEvent> queue = new LinkedBlockingQueue<FeatureEvent>(QUEUE_SIZE);
+    Thread pushingThread = null;
+
+    public ConfigFeaturesListener(ConfigPusher p, FeaturesService f) {
+        pushingThread = new Thread(new ConfigPushingRunnable(p, f, queue), "ConfigFeatureListener - ConfigPusher");
+        pushingThread.start();
+    }
+
+    @Override
+    public void featureEvent(FeatureEvent event) {
+        queue.offer(event);
+    }
+
+    @Override
+    public void repositoryEvent(RepositoryEvent event) {
+        logger.debug("Repository: " + event.getType() + " " + event.getRepository());
+    }
+
+    @Override
+    public void close() {
+        if(pushingThread != null) {
+            pushingThread.interrupt();
+            pushingThread = null;
+        }
+    }
+}
diff --git a/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/ConfigPusherCustomizer.java b/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/ConfigPusherCustomizer.java
new file mode 100644 (file)
index 0000000..d33a8cb
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import org.apache.karaf.features.FeaturesService;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.util.tracker.ServiceTracker;
+import org.osgi.util.tracker.ServiceTrackerCustomizer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class ConfigPusherCustomizer implements ServiceTrackerCustomizer<ConfigPusher, ConfigPusher>, AutoCloseable {
+    private static final Logger logger = LoggerFactory.getLogger(ConfigPusherCustomizer.class);
+    private ConfigFeaturesListener configFeaturesListener = null;
+    private FeatureServiceCustomizer featureServiceCustomizer = null;
+    private ServiceTracker<FeaturesService,FeaturesService> fsst = null;
+
+    @Override
+    public ConfigPusher addingService(ServiceReference<ConfigPusher> configPusherServiceReference) {
+        logger.trace("Got ConfigPusherCustomizer.addingService {}", configPusherServiceReference);
+        BundleContext bc = configPusherServiceReference.getBundle().getBundleContext();
+        ConfigPusher cpService = bc.getService(configPusherServiceReference);
+        featureServiceCustomizer = new FeatureServiceCustomizer(cpService);
+        fsst = new ServiceTracker<FeaturesService, FeaturesService>(bc, FeaturesService.class.getName(), featureServiceCustomizer);
+        fsst.open();
+        return cpService;
+    }
+
+    @Override
+    public void modifiedService(ServiceReference<ConfigPusher> configPusherServiceReference, ConfigPusher configPusher) {
+        // we don't care if the properties change
+    }
+
+    @Override
+    public void removedService(ServiceReference<ConfigPusher> configPusherServiceReference, ConfigPusher configPusher) {
+        this.close();
+    }
+
+    @Override
+    public void close() {
+        if(fsst != null) {
+            fsst.close();
+            fsst = null;
+        }
+        if(configFeaturesListener != null) {
+            configFeaturesListener.close();
+            configFeaturesListener = null;
+        }
+        if(featureServiceCustomizer != null) {
+            featureServiceCustomizer.close();
+            featureServiceCustomizer = null;
+        }
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/ConfigPushingRunnable.java b/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/ConfigPushingRunnable.java
new file mode 100644 (file)
index 0000000..06c5c92
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.karaf.features.Feature;
+import org.apache.karaf.features.FeatureEvent;
+import org.apache.karaf.features.FeatureEvent.EventType;
+import org.apache.karaf.features.FeaturesService;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.LinkedHashMultimap;
+
+public class ConfigPushingRunnable implements Runnable {
+    private static final Logger logger = LoggerFactory.getLogger(ConfigPushingRunnable.class);
+    private static final int POLL_TIME = 1;
+    private BlockingQueue<FeatureEvent> queue;
+    private FeatureConfigPusher configPusher;
+    public ConfigPushingRunnable(ConfigPusher p, FeaturesService f,BlockingQueue<FeatureEvent> q) {
+        queue = q;
+        configPusher = new FeatureConfigPusher(p, f);
+    }
+
+    @Override
+    public void run() {
+        List<Feature> toInstall = new ArrayList<Feature>();
+        FeatureEvent event;
+        boolean interuppted = false;
+        while(true) {
+            try {
+                if(!interuppted) {
+                        if(toInstall.isEmpty()) {
+                            event = queue.take();
+                        } else {
+                            event = queue.poll(POLL_TIME, TimeUnit.MILLISECONDS);
+                        }
+                        if(event != null && event.getFeature() !=null) {
+                            processFeatureEvent(event,toInstall);
+                        }
+                } else if(toInstall.isEmpty()) {
+                    logger.error("ConfigPushingRunnable - exiting");
+                    return;
+                }
+            } catch (InterruptedException e) {
+                logger.error("ConfigPushingRunnable - interupted");
+                interuppted = true;
+            } catch (Exception e) {
+                logger.error("Exception while processing features {}", e);
+            }
+        }
+    }
+
+    protected void processFeatureEvent(FeatureEvent event, List<Feature> toInstall) throws InterruptedException, Exception {
+        if(event.getType() == EventType.FeatureInstalled) {
+            toInstall.add(event.getFeature());
+            LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> result = configPusher.pushConfigs(toInstall);
+            toInstall.removeAll(result.keySet());
+        } else if(event.getType() == EventType.FeatureUninstalled) {
+            toInstall.remove(event.getFeature());
+        }
+    }
+
+    protected void logPushResult(LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> results) {
+        for(Feature f:results.keySet()) {
+            logger.info("Pushed configs for feature {} {}",f,results.get(f));
+        }
+    }
+}
diff --git a/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/FeatureConfigPusher.java b/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/FeatureConfigPusher.java
new file mode 100644 (file)
index 0000000..1c094ad
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedHashSet;
+import java.util.List;
+
+import org.apache.karaf.features.Feature;
+import org.apache.karaf.features.FeaturesService;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
+import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.LinkedHashMultimap;
+
+/*
+ * Simple class to push configs to the config subsystem from Feature's configfiles
+ */
+public class FeatureConfigPusher {
+    private static final Logger logger = LoggerFactory.getLogger(FeatureConfigPusher.class);
+    private FeaturesService featuresService = null;
+    private ConfigPusher pusher = null;
+    /*
+     * A LinkedHashSet (to preserve order and insure uniqueness) of the pushedConfigs
+     * This is used to prevent pushing duplicate configs if a Feature is in multiple dependency
+     * chains.  Also, preserves the *original* Feature chain for which we pushed the config.
+     * (which is handy for logging).
+     */
+    LinkedHashSet<FeatureConfigSnapshotHolder> pushedConfigs = new LinkedHashSet<FeatureConfigSnapshotHolder>();
+    /*
+     * LinkedHashMultimap to track which configs we pushed for each Feature installation
+     * For future use
+     */
+    LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> feature2configs = LinkedHashMultimap.create();
+
+    /*
+     * @param p - ConfigPusher to push ConfigSnapshotHolders
+     */
+    public FeatureConfigPusher(ConfigPusher p, FeaturesService f) {
+        pusher = p;
+        featuresService = f;
+    }
+    /*
+     * Push config files from Features to config subsystem
+     * @param features - list of Features to extract config files from recursively and push
+     * to the config subsystem
+     *
+     * @return A LinkedHashMultimap of Features to the FeatureConfigSnapshotHolder actually pushed
+     * If a Feature is not in the returned LinkedHashMultimap then we couldn't push its configs
+     * (Ususally because it was not yet installed)
+     */
+    public LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> pushConfigs(List<Feature> features) throws Exception, InterruptedException {
+        LinkedHashMultimap<Feature,FeatureConfigSnapshotHolder> pushedFeatures = LinkedHashMultimap.create();
+        for(Feature feature: features) {
+            LinkedHashSet<FeatureConfigSnapshotHolder> configSnapShots = pushConfig(feature);
+            if(!configSnapShots.isEmpty()) {
+                pushedFeatures.putAll(feature,configSnapShots);
+            }
+        }
+        return pushedFeatures;
+    }
+
+    private LinkedHashSet<FeatureConfigSnapshotHolder> pushConfig(Feature feature) throws Exception, InterruptedException {
+        LinkedHashSet<FeatureConfigSnapshotHolder> configs = new LinkedHashSet<FeatureConfigSnapshotHolder>();
+        if(isInstalled(feature)) {
+            ChildAwareFeatureWrapper wrappedFeature = new ChildAwareFeatureWrapper(feature,featuresService);
+            configs = wrappedFeature.getFeatureConfigSnapshotHolders();
+            if(!configs.isEmpty()) {
+                configs = pushConfig(configs);
+                feature2configs.putAll(feature, configs);
+            }
+        }
+        return configs;
+    }
+
+    private boolean isInstalled(Feature feature) {
+        List<Feature> installedFeatures = Arrays.asList(featuresService.listInstalledFeatures());
+        return installedFeatures.contains(feature);
+    }
+
+    private LinkedHashSet<FeatureConfigSnapshotHolder> pushConfig(LinkedHashSet<FeatureConfigSnapshotHolder> configs) throws InterruptedException {
+        LinkedHashSet<FeatureConfigSnapshotHolder> configsToPush = new LinkedHashSet<FeatureConfigSnapshotHolder>(configs);
+        configsToPush.removeAll(pushedConfigs);
+        if(!configsToPush.isEmpty()) {
+            pusher.pushConfigs(new ArrayList<ConfigSnapshotHolder>(configsToPush));
+            pushedConfigs.addAll(configsToPush);
+        }
+        LinkedHashSet<FeatureConfigSnapshotHolder> configsPushed = new LinkedHashSet<FeatureConfigSnapshotHolder>(pushedConfigs);
+        configsPushed.retainAll(configs);
+        return configsPushed;
+    }
+}
diff --git a/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/FeatureConfigSnapshotHolder.java b/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/FeatureConfigSnapshotHolder.java
new file mode 100644 (file)
index 0000000..d1a92eb
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import java.io.File;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.SortedSet;
+
+import javax.xml.bind.JAXBContext;
+import javax.xml.bind.JAXBException;
+import javax.xml.bind.Unmarshaller;
+
+import org.apache.karaf.features.ConfigFileInfo;
+import org.apache.karaf.features.Feature;
+import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
+import org.opendaylight.controller.config.persist.storage.file.xml.model.ConfigSnapshot;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+
+/*
+ * A ConfigSnapshotHolder that can track all the additional information
+ * relavent to the fact we are getting these from a Feature.
+ *
+ * Includes tracking the 'featureChain' - an reverse ordered list of the dependency
+ * graph of features that caused us to push this FeatureConfigSnapshotHolder.
+ * So if A -> B -> C, then the feature chain would be C -> B -> A
+ */
+public class FeatureConfigSnapshotHolder implements ConfigSnapshotHolder {
+    private ConfigSnapshot unmarshalled = null;
+    private ConfigFileInfo fileInfo = null;
+    private List<Feature> featureChain = new ArrayList<Feature>();
+
+    /*
+     * @param holder - FeatureConfigSnapshotHolder that we
+     * @param feature - new
+     */
+    public FeatureConfigSnapshotHolder(final FeatureConfigSnapshotHolder holder, final Feature feature) throws JAXBException {
+        this(holder.fileInfo,holder.getFeature());
+        this.featureChain.add(feature);
+    }
+
+    /*
+     * Create a FeatureConfigSnapshotHolder for a given ConfigFileInfo and record the associated
+     * feature we are creating it from.
+     * @param fileInfo - ConfigFileInfo to read into the ConfigSnapshot
+     * @param feature - Feature the ConfigFileInfo was attached to
+     */
+    public FeatureConfigSnapshotHolder(final ConfigFileInfo fileInfo, final Feature feature) throws JAXBException {
+        Preconditions.checkNotNull(fileInfo);
+        Preconditions.checkNotNull(fileInfo.getFinalname());
+        Preconditions.checkNotNull(feature);
+        this.fileInfo = fileInfo;
+        this.featureChain.add(feature);
+        JAXBContext jaxbContext = JAXBContext.newInstance(ConfigSnapshot.class);
+        Unmarshaller um = jaxbContext.createUnmarshaller();
+        File file = new File(fileInfo.getFinalname());
+        unmarshalled = ((ConfigSnapshot) um.unmarshal(file));
+    }
+    /*
+     * (non-Javadoc)
+     * @see java.lang.Object#hashCode()
+     *
+     * We really care most about the underlying ConfigShapshot, so compute hashcode on that
+     */
+    @Override
+    public int hashCode() {
+        final int prime = 31;
+        int result = 1;
+        result = prime * result + ((unmarshalled != null && unmarshalled.getConfigSnapshot() == null) ? 0 : unmarshalled.getConfigSnapshot().hashCode());
+        return result;
+    }
+    /*
+     * (non-Javadoc)
+     * @see java.lang.Object#equals(java.lang.Object)
+     * *
+     * We really care most about the underlying ConfigShapshot, so compute equality on that
+     */
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj) {
+            return true;
+        }
+        if (obj == null) {
+            return false;
+        }
+        if (getClass() != obj.getClass()) {
+            return false;
+        }
+        FeatureConfigSnapshotHolder fcsh = (FeatureConfigSnapshotHolder)obj;
+        if(this.unmarshalled.getConfigSnapshot().equals(fcsh.unmarshalled.getConfigSnapshot())) {
+            return true;
+        }
+        return false;
+    }
+
+    @Override
+    public String toString() {
+       StringBuilder b = new StringBuilder();
+       Path p = Paths.get(fileInfo.getFinalname());
+       b.append(p.getFileName())
+           .append("(")
+           .append(getCauseFeature())
+           .append(",")
+           .append(getFeature())
+           .append(")");
+       return b.toString();
+
+    }
+
+    @Override
+    public String getConfigSnapshot() {
+        return unmarshalled.getConfigSnapshot();
+    }
+
+    @Override
+    public SortedSet<String> getCapabilities() {
+        return unmarshalled.getCapabilities();
+    }
+
+    public ConfigFileInfo getFileInfo() {
+        return fileInfo;
+    }
+
+    /*
+     * @returns The original feature to which the ConfigFileInfo was attached
+     * Example:
+     * A -> B -> C, ConfigFileInfo Foo is attached to C.
+     * feature:install A
+     * thus C is the 'Feature' Foo was attached.
+     */
+    public Feature getFeature() {
+        return featureChain.get(0);
+    }
+
+    /*
+     * @return The dependency chain of the features that caused the ConfigFileInfo to be pushed in reverse order.
+     * Example:
+     * A -> B -> C, ConfigFileInfo Foo is attached to C.
+     * The returned list is
+     * [C,B,A]
+     */
+    public ImmutableList<Feature> getFeatureChain() {
+        return ImmutableList.copyOf(Lists.reverse(featureChain));
+    }
+
+    /*
+     * @return The feature the installation of which was the root cause
+     * of this pushing of the ConfigFileInfo.
+     * Example:
+     * A -> B -> C, ConfigFileInfo Foo is attached to C.
+     * feature:install A
+     * this A is the 'Cause' of the installation of Foo.
+     */
+    public Feature getCauseFeature() {
+        return Iterables.getLast(featureChain);
+    }
+}
diff --git a/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/FeatureServiceCustomizer.java b/opendaylight/config/config-persister-feature-adapter/src/main/java/org/opendaylight/controller/configpusherfeature/internal/FeatureServiceCustomizer.java
new file mode 100644 (file)
index 0000000..e72c827
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.configpusherfeature.internal;
+
+import org.apache.karaf.features.FeaturesListener;
+import org.apache.karaf.features.FeaturesService;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
+import org.osgi.framework.BundleContext;
+import org.osgi.framework.ServiceReference;
+import org.osgi.framework.ServiceRegistration;
+import org.osgi.util.tracker.ServiceTrackerCustomizer;
+
+public class FeatureServiceCustomizer implements ServiceTrackerCustomizer<FeaturesService, FeaturesService>, AutoCloseable {
+    private ConfigPusher configPusher = null;
+    private ConfigFeaturesListener configFeaturesListener = null;
+    private ServiceRegistration<?> registration;
+
+    FeatureServiceCustomizer(ConfigPusher c) {
+        configPusher = c;
+    }
+
+
+    @Override
+    public FeaturesService addingService(ServiceReference<FeaturesService> reference) {
+        BundleContext bc = reference.getBundle().getBundleContext();
+        FeaturesService featureService = bc.getService(reference);
+        configFeaturesListener = new ConfigFeaturesListener(configPusher,featureService);
+        registration = bc.registerService(FeaturesListener.class.getCanonicalName(), configFeaturesListener, null);
+        return featureService;
+    }
+
+    @Override
+    public void modifiedService(ServiceReference<FeaturesService> reference,
+            FeaturesService service) {
+        // we don't care if the properties change
+
+    }
+
+    @Override
+    public void removedService(ServiceReference<FeaturesService> reference,
+            FeaturesService service) {
+        close();
+    }
+
+    @Override
+    public void close() {
+        if(registration != null) {
+            registration.unregister();
+            registration = null;
+        }
+    }
+
+}
index 343d13e9c19194ad67680801639fdf52df9563b3..b8ad26116a2915634be51f430d8d5769ce2e949b 100644 (file)
@@ -23,6 +23,7 @@
     <module>config-util</module>
     <module>config-persister-api</module>
     <module>config-persister-file-xml-adapter</module>
+    <module>config-persister-feature-adapter</module>
     <module>yang-jmx-generator</module>
     <module>yang-jmx-generator-plugin</module>
     <module>yang-test</module>
index 5effbb09fc81536ed2cf176427c37f25d16e9ddf..b3c3f20ba80bb5cc829c5a208669a239ec8a1981 100644 (file)
       <type>kar</type>
       <scope>runtime</scope>
     </dependency>
-    <dependency>
-      <groupId>org.opendaylight.controller</groupId>
-      <artifactId>config-netty-features</artifactId>
-      <classifier>features</classifier>
-      <type>xml</type>
-      <scope>runtime</scope>
-    </dependency>
-
     <!-- AD-SAL Related Features -->
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
     <!-- MD-SAL Related Features -->
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>mdsal-features</artifactId>
+      <artifactId>features-mdsal</artifactId>
+      <classifier>features</classifier>
+      <type>xml</type>
+      <scope>runtime</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.opendaylight.controller</groupId>
+      <artifactId>features-flow</artifactId>
       <classifier>features</classifier>
       <type>xml</type>
       <scope>runtime</scope>
diff --git a/opendaylight/distribution/opendaylight-karaf/src/main/resources/configuration/initial/00-netty.xml b/opendaylight/distribution/opendaylight-karaf/src/main/resources/configuration/initial/00-netty.xml
deleted file mode 100644 (file)
index 2365c70..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-<snapshot>
-    <required-capabilities>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:netty?module=netty&amp;revision=2013-11-19</capability>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:netty:eventexecutor?module=netty-event-executor&amp;revision=2013-11-12</capability>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:netty:threadgroup?module=threadgroup&amp;revision=2013-11-07</capability>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:netty:timer?module=netty-timer&amp;revision=2013-11-19</capability>
-    </required-capabilities>
-    <configuration>
-    
-        <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
-            <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
-                <module>
-                    <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty:threadgroup">netty:netty-threadgroup-fixed</type>
-                    <name>global-boss-group</name>
-                </module>
-                <module>
-                    <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty:threadgroup">netty:netty-threadgroup-fixed</type>
-                    <name>global-worker-group</name>
-                </module>
-                <module>
-                    <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty:timer">netty:netty-hashed-wheel-timer</type>
-                    <name>global-timer</name>
-                </module>
-                <module>
-                    <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty:eventexecutor">netty:netty-global-event-executor</type>
-                    <name>singleton</name>
-                </module>
-            </modules>
-            
-            <services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
-                <service>
-                    <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty">netty:netty-threadgroup</type>
-                    <instance>
-                        <name>global-boss-group</name>
-                        <provider>/modules/module[type='netty-threadgroup-fixed'][name='global-boss-group']</provider>
-                    </instance>
-                    <instance>
-                        <name>global-worker-group</name>
-                        <provider>/modules/module[type='netty-threadgroup-fixed'][name='global-worker-group']</provider>
-                    </instance>
-                </service>
-                <service>
-                    <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty">netty:netty-event-executor</type>
-                    <instance>
-                        <name>global-event-executor</name>
-                        <provider>/modules/module[type='netty-global-event-executor'][name='singleton']</provider>
-                    </instance>
-                </service>
-                <service>
-                    <type xmlns:netty="urn:opendaylight:params:xml:ns:yang:controller:netty">netty:netty-timer</type>
-                    <instance>
-                        <name>global-timer</name>
-                        <provider>/modules/module[type='netty-hashed-wheel-timer'][name='global-timer']</provider>
-                    </instance>
-                </service>
-            </services>
-        </data>
-
-    </configuration>
-</snapshot>
diff --git a/opendaylight/distribution/opendaylight-karaf/src/main/resources/configuration/initial/01-md-sal.xml b/opendaylight/distribution/opendaylight-karaf/src/main/resources/configuration/initial/01-md-sal.xml
deleted file mode 100644 (file)
index 619ab06..0000000
+++ /dev/null
@@ -1,203 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- vi: set et smarttab sw=4 tabstop=4: -->
-<!--
- Copyright (c) 2013 Cisco Systems, Inc. and others.  All rights reserved.
-
- This program and the accompanying materials are made available under the
- terms of the Eclipse Public License v1.0 which accompanies this distribution,
- and is available at http://www.eclipse.org/legal/epl-v10.html
--->
-<snapshot>
-    <configuration>
-        <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
-            <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:schema-service-singleton</type>
-                    <name>yang-schema-service</name>
-                </module>
-                <!-- To enable use of new in-memory datastore and new implementations
-                     of data brokers, comment out all parts of this
-                     xml which are marked with DATA-BROKER and uncomment all parts
-                     of this xml which are marked with NEW-DATA-BROKER
-                -->
-                <!-- DATA-BROKER start-->
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:hash-map-data-store</type>
-                    <name>hash-map-data-store</name>
-                </module>
-                <!-- DATA BROKER end -->
-                <!-- NEW-DATA-BROKER start -->
-                <!--
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:dom-inmemory-data-broker</type>
-                    <name>async-data-broker</name>
-                    <schema-service>
-                        <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
-                        <name>yang-schema-service</name>
-                    </schema-service>
-                </module>
-                -->
-                <!-- NEW-DATA-BROKER end -->
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">prefix:dom-broker-impl</type>
-                    <name>dom-broker</name>
-                    <!-- DATA-BROKER start -->
-                    <data-store xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl">
-                        <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-data-store</type>
-                        <!-- to switch to the clustered data store, comment out the hash-map-data-store <name> and uncomment the cluster-data-store one -->
-                        <name>hash-map-data-store</name>
-                        <!-- <name>cluster-data-store</name> -->
-                    </data-store>
-                    <!-- DATA-BROKER end -->
-                    <!-- NEW-DATA-BROKER start -->
-                    <!--
-                    <async-data-broker>
-                        <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
-                        <name>async-data-broker</name>
-                    </async-data-broker>
-                    -->
-                    <!-- NEW-DATA-BROKER end -->
-                </module>
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-broker-impl</type>
-                    <name>binding-broker-impl</name>
-                    <notification-service xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
-                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-notification-service</type>
-                        <name>binding-notification-broker</name>
-                    </notification-service>
-                    <data-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
-                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
-                        <name>binding-data-broker</name>
-                    </data-broker>
-                </module>
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:runtime-generated-mapping</type>
-                    <name>runtime-mapping-singleton</name>
-                </module>
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-notification-broker</type>
-                    <name>binding-notification-broker</name>
-                </module>
-                <!-- DATA-BROKER start -->
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-data-broker</type>
-                    <name>binding-data-broker</name>
-                    <dom-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
-                        <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
-                        <name>dom-broker</name>
-                    </dom-broker>
-                    <mapping-service xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
-                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding:binding-dom-mapping-service</type>
-                        <name>runtime-mapping-singleton</name>
-                    </mapping-service>
-                </module>
-                <!-- DATA-BROKER end -->
-                <!-- NEW-DATA-BROKER start -->
-                <!--
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">prefix:binding-data-compatible-broker</type>
-                    <name>binding-data-broker</name>
-                    <dom-async-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
-                        <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
-                        <name>dom-broker</name>
-                    </dom-async-broker>
-                    <binding-mapping-service xmlns="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">
-                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding:binding-dom-mapping-service</type>
-                        <name>runtime-mapping-singleton</name>
-                    </binding-mapping-service>
-                </module>
-                -->
-                <!-- NEW-DATA-BROKER end -->
-            </modules>
-            <services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
-                <service>
-                    <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
-                    <instance>
-                        <name>yang-schema-service</name>
-                        <provider>/modules/module[type='schema-service-singleton'][name='yang-schema-service']</provider>
-                    </instance>
-                </service>
-                <service>
-                    <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-notification-service</type>
-                    <instance>
-                        <name>binding-notification-broker</name>
-                        <provider>/modules/module[type='binding-notification-broker'][name='binding-notification-broker']</provider>
-                    </instance>
-                </service>
-                <!-- DATA-BROKER start -->
-                <service>
-                    <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-data-store</type>
-                    <instance>
-                        <name>hash-map-data-store</name>
-                        <provider>/modules/module[type='hash-map-data-store'][name='hash-map-data-store']</provider>
-                    </instance>
-                </service>
-                <!-- DATA-BROKER end -->
-                <!-- NEW-DATA-BROKER start -->
-                <!--
-                <service>
-                    <type  xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
-                    <instance>
-                        <name>async-data-broker</name>
-                        <provider>/modules/module[type='dom-inmemory-data-broker'][name='async-data-broker']</provider>
-                    </instance>
-                </service>
-                -->
-                <!-- NEW-DATA-BROKER end -->
-                <service>
-                    <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-broker-osgi-registry</type>
-                    <instance>
-                        <name>binding-osgi-broker</name>
-                        <provider>/modules/module[type='binding-broker-impl'][name='binding-broker-impl']</provider>
-                    </instance>
-                </service>
-                <service>
-                    <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
-                    <instance>
-                        <name>binding-rpc-broker</name>
-                        <provider>/modules/module[type='binding-broker-impl'][name='binding-broker-impl']</provider>
-                    </instance>
-                </service>
-                <service>
-                    <type xmlns:binding-impl="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding-impl:binding-dom-mapping-service</type>
-                    <instance>
-                        <name>runtime-mapping-singleton</name>
-                        <provider>/modules/module[type='runtime-generated-mapping'][name='runtime-mapping-singleton']</provider>
-                    </instance>
-                </service>
-                <service>
-                <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
-                    <instance>
-                        <name>dom-broker</name>
-                        <provider>/modules/module[type='dom-broker-impl'][name='dom-broker']</provider>
-                    </instance>
-                </service>
-
-                <service>
-                    <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
-                    <instance>
-                        <name>binding-data-broker</name>
-                        <!--  DATA-BROKER start -->
-                        <provider>/modules/module[type='binding-data-broker'][name='binding-data-broker']</provider>
-                        <!--  DATA-BROKER end -->
-                        <!-- NEW-DATA-BROKER start -->
-                        <!--
-                        <provider>/modules/module[type='binding-data-compatible-broker'][name='binding-data-broker']</provider>
-                        -->
-                        <!--  NEW-DATA-BROKER end -->
-                    </instance>
-                </service>
-
-            </services>
-        </data>
-    </configuration>
-    <required-capabilities>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:netty:eventexecutor?module=netty-event-executor&amp;revision=2013-11-12</capability>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:threadpool?module=threadpool&amp;revision=2013-04-09</capability>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&amp;revision=2013-10-28</capability>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom?module=opendaylight-md-sal-dom&amp;revision=2013-10-28</capability>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl?module=opendaylight-sal-binding-broker-impl&amp;revision=2013-10-28</capability>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom:impl?module=opendaylight-sal-dom-broker-impl&amp;revision=2013-10-28</capability>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:common?module=opendaylight-md-sal-common&amp;revision=2013-10-28</capability>
-    </required-capabilities>
-</snapshot>
diff --git a/opendaylight/distribution/opendaylight-karaf/src/main/resources/configuration/initial/03-toaster-sample.xml b/opendaylight/distribution/opendaylight-karaf/src/main/resources/configuration/initial/03-toaster-sample.xml
deleted file mode 100644 (file)
index c481485..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-<snapshot>
-    <configuration>
-        <data xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
-            <modules xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:toaster-provider:impl">
-                        prefix:toaster-provider-impl
-                    </type>
-                    <name>toaster-provider-impl</name>
-
-                    <rpc-registry>
-                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
-                        <name>binding-rpc-broker</name>
-                    </rpc-registry>
-
-                    <notification-service>
-                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">
-                            binding:binding-notification-service
-                        </type>
-                        <name>binding-notification-broker</name>
-                    </notification-service>
-                </module>
-
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:toaster-consumer:impl">
-                        prefix:toaster-consumer-impl
-                    </type>
-                    <name>toaster-consumer-impl</name>
-
-                    <rpc-registry>
-                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-rpc-registry</type>
-                        <name>binding-rpc-broker</name>
-                    </rpc-registry>
-
-                    <notification-service>
-                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">
-                            binding:binding-notification-service
-                        </type>
-                        <name>binding-notification-broker</name>
-                    </notification-service>
-                </module>
-            </modules>
-
-            <services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
-                <service>
-                    <type xmlns:toaster="urn:opendaylight:params:xml:ns:yang:controller:config:toaster-provider">toaster:toaster-provider</type>
-                    <instance>
-                        <name>toaster-provider</name>
-                        <provider>/modules/module[type='toaster-provider-impl'][name='toaster-provider-impl']</provider>
-                    </instance>
-                </service>
-                <service>
-                    <type xmlns:toaster="urn:opendaylight:params:xml:ns:yang:controller:config:toaster-consumer">toaster:toaster-consumer</type>
-                    <instance>
-                        <name>toaster-consumer</name>
-                        <provider>/modules/module[type='toaster-consumer-impl'][name='toaster-consumer-impl']</provider>
-                    </instance>
-                </service>
-            </services>
-        </data>
-
-    </configuration>
-
-    <required-capabilities>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding?module=opendaylight-md-sal-binding&amp;revision=2013-10-28</capability>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:config:toaster-consumer?module=toaster-consumer&amp;revision=2014-01-31</capability>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:config:toaster-consumer:impl?module=toaster-consumer-impl&amp;revision=2014-01-31</capability>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:config:toaster-provider?module=toaster-provider&amp;revision=2014-01-31</capability>
-        <capability>urn:opendaylight:params:xml:ns:yang:controller:config:toaster-provider:impl?module=toaster-provider-impl&amp;revision=2014-01-31</capability>
-    </required-capabilities>
-
-</snapshot>
-
index d1a5dcc416cc190dd3dabd80fdc5e8963a6af9a3..ed659bf60347662994032ae061c3e680b13069f6 100644 (file)
@@ -2,7 +2,7 @@
 
   <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
     <encoder>
-      <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} - %msg%n</pattern>
+      <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} %X{akkaSource} - %msg%n</pattern>
     </encoder>
   </appender>
   <appender name="opendaylight.log" class="ch.qos.logback.core.rolling.RollingFileAppender">
   <!-- Web modules -->
   <logger name="org.opendaylight.controller.web" level="INFO"/>
 
+  <!-- Clustering -->
+  <logger name="org.opendaylight.controller.cluster" level="INFO"/>
+  <logger name="org.opendaylight.controller.cluster.datastore.node" level="INFO"/>
+
   <!--
        Unsynchronized controller startup causes models to crop up in random
        order, which results in temporary inability to fully resolve a model,
index 6c1ca421c257ab81807c20f4a939d556cb5338c6..e9a6992521250d612c1924e4f0fa46548f63d741 100644 (file)
@@ -25,18 +25,11 @@ netconf.ssh.port=1830
 netconf.ssh.pk.path = ./configuration/RSA.pk
 
 
-netconf.config.persister.active=1,2
-# read startup configuration
-netconf.config.persister.1.storageAdapterClass=org.opendaylight.controller.config.persist.storage.directory.xml.XmlDirectoryStorageAdapter
-netconf.config.persister.1.properties.directoryStorage=configuration/initial/
-
-# include only xml files, files with other extensions will be skipped, multiple extensions are permitted e.g. netconf.config.persister.1.properties.includeExtensions=xml,cfg,config
-netconf.config.persister.1.properties.includeExtensions=xml
-netconf.config.persister.1.readonly=true
-
-netconf.config.persister.2.storageAdapterClass=org.opendaylight.controller.config.persist.storage.file.xml.XmlFileStorageAdapter
-netconf.config.persister.2.properties.fileStorage=configuration/current/controller.currentconfig.xml
-netconf.config.persister.2.properties.numberOfBackups=1
+netconf.config.persister.active=1
+
+netconf.config.persister.1.storageAdapterClass=org.opendaylight.controller.config.persist.storage.file.xml.XmlFileStorageAdapter
+netconf.config.persister.1.properties.fileStorage=etc/opendaylight/current/controller.currentconfig.xml
+netconf.config.persister.1.properties.numberOfBackups=1
 
 # logback configuration
 logback.configurationFile=configuration/logback.xml
index e91da89970b381a5127dc99bf9027ea4969765db..a98956e98d41043a4e373e40e65799d38faa5f77 100644 (file)
@@ -180,7 +180,8 @@ jre-1.6= \
  org.w3c.dom.xpath, \
  org.xml.sax, \
  org.xml.sax.ext, \
- org.xml.sax.helpers
+ org.xml.sax.helpers, \
+ javax.annotation.processing
 
 # Standard package set.  Note that:
 #   - javax.transaction* is exported with a mandatory attribute
@@ -341,7 +342,8 @@ jre-1.7= \
  org.w3c.dom.xpath, \
  org.xml.sax, \
  org.xml.sax.ext, \
- org.xml.sax.helpers
+ org.xml.sax.helpers, \
+ javax.annotation.processing
 
 jre-1.8= \
  javax.accessibility, \
@@ -500,4 +502,5 @@ jre-1.8= \
  org.w3c.dom.xpath, \
  org.xml.sax, \
  org.xml.sax.ext, \
- org.xml.sax.helpers
+ org.xml.sax.helpers, \
+ javax.annotation.processing
index 7ab56e6d03d63071216189c716f3022b6e834967..969ecc2cbed41054fde61fb8aecfe8ae940ed95d 100644 (file)
           <groupId>org.opendaylight.controller.md</groupId>
           <artifactId>topology-lldp-discovery</artifactId>
         </dependency>
+        <dependency>
+          <groupId>org.opendaylight.controller</groupId>
+          <artifactId>liblldp</artifactId>
+        </dependency>
         <dependency>
           <groupId>org.opendaylight.controller.md</groupId>
           <artifactId>topology-manager</artifactId>
index 94a3702fdbc56d70073d593f6ed7b50a4a35d46a..b73244bc0a4c12cedd00121902c9a6246c5d3d6b 100644 (file)
@@ -2,7 +2,7 @@
 
   <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
     <encoder>
-      <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} - %msg%n</pattern>
+      <pattern>%date{"yyyy-MM-dd HH:mm:ss.SSS z"} [%thread] %-5level %logger{36} %X{akkaSource} - %msg%n</pattern>
     </encoder>
   </appender>
   <appender name="opendaylight.log" class="ch.qos.logback.core.rolling.RollingFileAppender">
   <!-- Web modules -->
   <logger name="org.opendaylight.controller.web" level="INFO"/>
 
+  <!-- Clustering -->
+  <logger name="org.opendaylight.controller.cluster" level="INFO"/>
+  <logger name="org.opendaylight.controller.cluster.datastore.node" level="INFO"/>
+
   <!--
        Unsynchronized controller startup causes models to crop up in random
        order, which results in temporary inability to fully resolve a model,
index 35a77662b542cb18333723f174d762328057ff48..c58f6cb6abe414ec1778943a5d7bb0563920ca28 100644 (file)
                     </schema-service>
                 </module>
 
-                <!-- DISTRIBUTED_DATA_STORE -->
-                <!-- Enable the following modules if you want to use the Distributed Data Store instead of the InMemoryDataStore -->
-                <!--
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:distributed-datastore-provider">prefix:distributed-operational-datastore-provider</type>
-                    <name>distributed-operational-store-module</name>
-                    <operational-schema-service>
-                        <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
-                        <name>yang-schema-service</name>
-                    </operational-schema-service>
-                </module>
-
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:distributed-datastore-provider">prefix:distributed-config-datastore-provider</type>
-                    <name>distributed-config-store-module</name>
-                    <configschema-service>
-                        <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
-                        <name>yang-schema-service</name>
-                    </config-schema-service>
-                </module>
-                -->
-
                 <module>
                     <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:inmemory-datastore-provider">prefix:inmemory-operational-datastore-provider</type>
                     <name>operational-store-service</name>
                    <config-data-store>
                         <type xmlns:config-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:config-dom-store">config-dom-store-spi:config-dom-datastore</type>
                         <name>config-store-service</name>
-                        <!-- DISTRIBUTED_DATA_STORE -->
-                        <!--
-                        <name>distributed-config-store-service</name>
-                        -->
                     </config-data-store>
 
                     <operational-data-store>
                         <type xmlns:operational-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:operational-dom-store">operational-dom-store-spi:operational-dom-datastore</type>
                         <name>operational-store-service</name>
-                        <!-- DISTRIBUTED_DATA_STORE -->
-                        <!--
-                        <name>distributed-operational-store-service</name>
-                        -->
-
                     </operational-data-store>
                 </module>
                 <module>
                         </binding-mapping-service>
                     </binding-forwarded-data-broker>
                 </module>
-                <!-- Cluster RPC -->
-                <!-- Enable the following module if you want to use remote rpc connector
-                <module>
-                    <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:remote-rpc-connector">prefix:remote-rpc-connector</type>
-                    <name>remote-rpc-connector</name>
-                    <dom-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:config:remote-rpc-connector">
-                        <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
-                        <name>dom-broker</name>
-                    </dom-broker>
-                </module>
-                -->
             </modules>
             <services xmlns="urn:opendaylight:params:xml:ns:yang:controller:config">
                     <service>
                         </instance>
                     </service>
 
-                <!-- DISTRIBUTED_DATA_STORE -->
-                <!-- Enable the following if you want to use the Distributed Data Store instead of the InMemory Data Store -->
-                <!-- Note that you MUST delete the InMemoryDataStore related services which provide config-dom-datastore and operational-dom-datastore -->
-                <!--
-                <service>
-                    <type xmlns:config-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:config-dom-store">config-dom-store-spi:config-dom-datastore</type>
-                    <instance>
-                        <name>distributed-config-store-service</name>
-                        <provider>/modules/module[type='distributed-config-datastore-provider'][name='distributed-config-store-module']</provider>
-                    </instance>
-                </service>
-                <service>
-                    <type xmlns:operational-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:operational-dom-store">operational-dom-store-spi:operational-dom-datastore</type>
-                    <instance>
-                        <name>distributed-operational-store-service</name>
-                        <provider>/modules/module[type='distributed-operational-datastore-provider'][name='distributed-operational-store-module']</provider>
-                    </instance>
-                </service>
-                -->
-
-                <!-- DISTRIBUTED_DATA_STORE -->
-                <!-- Delete the following two services (config-store-service and operational-store-service) if you want to use the distributed data store instead -->
                 <service>
                     <type xmlns:config-dom-store-spi="urn:opendaylight:params:xml:ns:yang:controller:md:sal:core:spi:config-dom-store">config-dom-store-spi:config-dom-datastore</type>
                     <instance>
index 0a979d24eef2a26d60cb6168c18d84815037cf18..ae8b6fe8e3aadc23a33cf57531a43ff9cefed8d4 100644 (file)
@@ -295,6 +295,22 @@ public abstract class RaftActor extends UntypedPersistentActor {
         return currentBehavior.state();
     }
 
+    protected ReplicatedLogEntry getLastLogEntry() {
+        return replicatedLog.last();
+    }
+
+    protected Long getCurrentTerm(){
+        return context.getTermInformation().getCurrentTerm();
+    }
+
+    protected Long getCommitIndex(){
+        return context.getCommitIndex();
+    }
+
+    protected Long getLastApplied(){
+        return context.getLastApplied();
+    }
+
     /**
      * setPeerAddress sets the address of a known peer at a later time.
      * <p>
@@ -602,7 +618,7 @@ public abstract class RaftActor extends UntypedPersistentActor {
         }
 
         @Override public void update(long currentTerm, String votedFor) {
-            LOG.info("Set currentTerm={}, votedFor={}", currentTerm, votedFor);
+            LOG.debug("Set currentTerm={}, votedFor={}", currentTerm, votedFor);
 
             this.currentTerm = currentTerm;
             this.votedFor = votedFor;
index 0a553b40fd59aab555f258f897a2154830afd1c8..251a13d583ec444ac4ca0c1cc028831feeb48958 100644 (file)
@@ -127,6 +127,9 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
     protected RaftState requestVote(ActorRef sender,
         RequestVote requestVote) {
 
+
+        context.getLogger().debug(requestVote.toString());
+
         boolean grantVote = false;
 
         //  Reply false if term < currentTerm (§5.1)
@@ -301,7 +304,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
      *
      * @param index a log index that is known to be committed
      */
-    protected void applyLogToStateMachine(long index) {
+    protected void applyLogToStateMachine(final long index) {
         // Now maybe we apply to the state machine
         for (long i = context.getLastApplied() + 1;
              i < index + 1; i++) {
@@ -326,6 +329,7 @@ public abstract class AbstractRaftActorBehavior implements RaftActorBehavior {
         }
         // Send a local message to the local RaftActor (it's derived class to be
         // specific to apply the log to it's index)
+        context.getLogger().debug("Setting last applied to {}", index);
         context.setLastApplied(index);
     }
 
index c125bd32b60a5c5d714ea13e7008417f904d88dc..bb1927ef231949bd320d0cf060d6a9a8018829bb 100644 (file)
@@ -81,7 +81,7 @@ public class Candidate extends AbstractRaftActorBehavior {
     @Override protected RaftState handleAppendEntries(ActorRef sender,
         AppendEntries appendEntries) {
 
-        context.getLogger().info("Candidate: Received {}", appendEntries.toString());
+        context.getLogger().debug(appendEntries.toString());
 
         return state();
     }
index c8cd41dfa1883a1c7d1d43d2f7697b0ff988ab38..54e0494b9da65305729afcf00686e3102c31dd00 100644 (file)
@@ -42,7 +42,7 @@ public class Follower extends AbstractRaftActorBehavior {
 
         if(appendEntries.getEntries() != null && appendEntries.getEntries().size() > 0) {
             context.getLogger()
-                .info("Follower: Received {}", appendEntries.toString());
+                .debug(appendEntries.toString());
         }
 
         // TODO : Refactor this method into a bunch of smaller methods
index a50666233c31f30b2e94cbf4c49d53a95cca93f4..234f9db664e4d43e833e4e354e3d3045094dd381 100644 (file)
@@ -19,7 +19,6 @@ import org.opendaylight.controller.cluster.raft.FollowerLogInformationImpl;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
 import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
-import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
 import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
 import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
 import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
@@ -121,7 +120,7 @@ public class Leader extends AbstractRaftActorBehavior {
     @Override protected RaftState handleAppendEntries(ActorRef sender,
         AppendEntries appendEntries) {
 
-        context.getLogger().info("Leader: Received {}", appendEntries.toString());
+        context.getLogger().debug(appendEntries.toString());
 
         return state();
     }
@@ -131,7 +130,7 @@ public class Leader extends AbstractRaftActorBehavior {
 
         if(! appendEntriesReply.isSuccess()) {
             context.getLogger()
-                .info("Leader: Received {}", appendEntriesReply.toString());
+                .debug(appendEntriesReply.toString());
         }
 
         // Update the FollowerLogInformation
@@ -264,26 +263,18 @@ public class Leader extends AbstractRaftActorBehavior {
 
         context.getLogger().debug("Replicate message " + logIndex);
 
+        // Create a tracker entry we will use this later to notify the
+        // client actor
+        trackerList.add(
+            new ClientRequestTrackerImpl(replicate.getClientActor(),
+                replicate.getIdentifier(),
+                logIndex)
+        );
+
         if (followers.size() == 0) {
-            context.setCommitIndex(
-                replicate.getReplicatedLogEntry().getIndex());
-
-            context.getActor()
-                .tell(new ApplyState(replicate.getClientActor(),
-                        replicate.getIdentifier(),
-                        replicate.getReplicatedLogEntry()),
-                    context.getActor()
-                );
+            context.setCommitIndex(logIndex);
+            applyLogToStateMachine(logIndex);
         } else {
-
-            // Create a tracker entry we will use this later to notify the
-            // client actor
-            trackerList.add(
-                new ClientRequestTrackerImpl(replicate.getClientActor(),
-                    replicate.getIdentifier(),
-                    logIndex)
-            );
-
             sendAppendEntries();
         }
     }
@@ -303,12 +294,7 @@ public class Leader extends AbstractRaftActorBehavior {
                 List<ReplicatedLogEntry> entries = Collections.emptyList();
 
                 if (context.getReplicatedLog().isPresent(nextIndex)) {
-                    // TODO: Instead of sending all entries from nextIndex
-                    // only send a fixed number of entries to each follower
-                    // This is to avoid the situation where there are a lot of
-                    // entries to install for a fresh follower or to a follower
-                    // that has fallen too far behind with the log but yet is not
-                    // eligible to receive a snapshot
+                    // FIXME : Sending one entry at a time
                     entries =
                         context.getReplicatedLog().getFrom(nextIndex, 1);
                 }
index d33b33925b5e7aba358152ff6067e4842fec3b71..17c22a134a9a7f26e08998930b2609b128f40c21 100644 (file)
@@ -8,6 +8,7 @@ import org.junit.Test;
 import org.opendaylight.controller.cluster.raft.MockRaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftActorContext;
 import org.opendaylight.controller.cluster.raft.RaftState;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogImplEntry;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplyState;
 import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
 import org.opendaylight.controller.cluster.raft.base.messages.SendHeartBeat;
@@ -154,18 +155,25 @@ public class LeaderTest extends AbstractRaftActorBehaviorTest {
                     MockRaftActorContext actorContext =
                         new MockRaftActorContext("test", getSystem(), raftActor);
 
+                    actorContext.getReplicatedLog().removeFrom(0);
+
+                    actorContext.getReplicatedLog().append(new ReplicatedLogImplEntry(0, 1,
+                        new MockRaftActorContext.MockPayload("foo")));
+
+                    ReplicatedLogImplEntry entry =
+                        new ReplicatedLogImplEntry(1, 1,
+                            new MockRaftActorContext.MockPayload("foo"));
+
+                    actorContext.getReplicatedLog().append(entry);
+
                     Leader leader = new Leader(actorContext);
                     RaftState raftState = leader
-                        .handleMessage(senderActor, new Replicate(null, "state-id",
-                            new MockRaftActorContext.MockReplicatedLogEntry(1,
-                                100,
-                                new MockRaftActorContext.MockPayload("foo"))
-                        ));
+                        .handleMessage(senderActor, new Replicate(null, "state-id",entry));
 
                     // State should not change
                     assertEquals(RaftState.Leader, raftState);
 
-                    assertEquals(100, actorContext.getCommitIndex());
+                    assertEquals(1, actorContext.getCommitIndex());
 
                     final String out =
                         new ExpectMsg<String>(duration("1 seconds"),
index 5ced7bae9fd3459b494a50013e297aacd8b3425f..8fbc118a16efcc59b74b4016743a8ff87ee24f7a 100644 (file)
@@ -7,22 +7,19 @@
  */
 package org.opendaylight.controller.md.sal.binding.impl;
 
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
 import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
 import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationOperation;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
-import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Optional;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
 
 public class AbstractReadWriteTransaction extends AbstractWriteTransaction<DOMDataReadWriteTransaction> {
 
@@ -50,15 +47,15 @@ public class AbstractReadWriteTransaction extends AbstractWriteTransaction<DOMDa
             org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier currentPath = org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.create(
                     currentArguments);
 
-            final Optional<NormalizedNode<?, ?>> d;
+            final Boolean exists;
             try {
-                d = getDelegate().read(store, currentPath).get();
-            } catch (InterruptedException | ExecutionException e) {
+                exists = getDelegate().exists(store, currentPath).checkedGet();
+            } catch (ReadFailedException e) {
                 LOG.error("Failed to read pre-existing data from store {} path {}", store, currentPath, e);
                 throw new IllegalStateException("Failed to read pre-existing data", e);
             }
 
-            if (!d.isPresent() && iterator.hasNext()) {
+            if (!exists && iterator.hasNext()) {
                 getDelegate().merge(store, currentPath, currentOp.createDefault(currentArg));
             }
         }
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/ForwardedBackwardsCompatibleDataBrokerTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/ForwardedBackwardsCompatibleDataBrokerTest.java
new file mode 100644 (file)
index 0000000..f91e356
--- /dev/null
@@ -0,0 +1,72 @@
+package org.opendaylight.controller.md.sal.binding.impl.test;
+
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.binding.impl.ForwardedBackwardsCompatibleDataBroker;
+import org.opendaylight.controller.md.sal.binding.test.AbstractSchemaAwareTest;
+import org.opendaylight.controller.md.sal.binding.test.DataBrokerTestCustomizer;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListBuilder;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelListKey;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+
+import java.util.concurrent.ExecutionException;
+
+import static junit.framework.TestCase.assertNotNull;
+
+public class ForwardedBackwardsCompatibleDataBrokerTest extends
+    AbstractSchemaAwareTest {
+
+    private DataBrokerTestCustomizer testCustomizer;
+    private ForwardedBackwardsCompatibleDataBroker dataBroker;
+    private DOMDataBroker domBroker;
+
+    private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.create(Top.class);
+    private static final TopLevelListKey TOP_LIST_KEY = new TopLevelListKey("foo");
+    private static final InstanceIdentifier<TopLevelList> NODE_PATH = TOP_PATH.child(TopLevelList.class, TOP_LIST_KEY);
+    private static final TopLevelList NODE = new TopLevelListBuilder().setKey(TOP_LIST_KEY).build();
+
+    protected DataBrokerTestCustomizer createDataBrokerTestCustomizer() {
+        return new DataBrokerTestCustomizer();
+    }
+
+    @Override
+    protected void setupWithSchema(final SchemaContext context) {
+        testCustomizer = createDataBrokerTestCustomizer();
+
+        domBroker = testCustomizer.createDOMDataBroker();
+        dataBroker = testCustomizer.createBackwardsCompatibleDataBroker();
+        testCustomizer.updateSchema(context);
+    }
+
+
+    /**
+     * The purpose of this test is to exercise the backwards compatible broker
+     * <p>
+     * This test tries to execute the code which ensures that the parents
+     * for a given node get automatically created.
+     *
+     * @see org.opendaylight.controller.md.sal.binding.impl.AbstractReadWriteTransaction#ensureParentsByMerge(org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType, org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier, org.opendaylight.yangtools.yang.binding.InstanceIdentifier)
+     */
+    @Test
+    public void test() throws InterruptedException, ExecutionException {
+        DataModificationTransaction writeTx =
+            dataBroker.beginTransaction();
+
+        writeTx.putOperationalData(NODE_PATH, NODE);
+
+        writeTx.commit();
+
+        // TOP_PATH should exist as it is the parent of NODE_PATH
+        DataObject object = dataBroker.readOperationalData(TOP_PATH);
+
+        assertNotNull(object);
+
+    }
+
+
+}
index e0f6f3546f528a2e72621447b8110dc9e377be71..60eec55ca55df3df580318e5608ea08f1a319801 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.md.sal.binding.test;
 import javassist.ClassPool;
 
 import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.impl.ForwardedBackwardsCompatibleDataBroker;
 import org.opendaylight.controller.md.sal.binding.impl.ForwardedBindingDataBroker;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
@@ -46,13 +47,15 @@ public class DataBrokerTestCustomizer {
     }
 
     public DOMStore createConfigurationDatastore() {
-        InMemoryDOMDataStore store = new InMemoryDOMDataStore("CFG", MoreExecutors.sameThreadExecutor());
+        InMemoryDOMDataStore store = new InMemoryDOMDataStore("CFG",
+                MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
         schemaService.registerSchemaContextListener(store);
         return store;
     }
 
     public DOMStore createOperationalDatastore() {
-        InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
+        InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER",
+                MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
         schemaService.registerSchemaContextListener(store);
         return store;
     }
@@ -69,6 +72,11 @@ public class DataBrokerTestCustomizer {
         return new ForwardedBindingDataBroker(getDOMDataBroker(), getMappingService(), getSchemaService());
     }
 
+    public ForwardedBackwardsCompatibleDataBroker createBackwardsCompatibleDataBroker() {
+        return new ForwardedBackwardsCompatibleDataBroker(getDOMDataBroker(), getMappingService(), getSchemaService(), MoreExecutors.sameThreadExecutor());
+    }
+
+
     private SchemaService getSchemaService() {
         return schemaService;
     }
index deb4a8aecacfbcffe47a2cee869c7d117c68f801..fef5715f50deea8ac6038b8684dc832c779cf582 100644 (file)
@@ -63,6 +63,7 @@ import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.MutableClassToInstanceMap;
 import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
 
 @Beta
 public class BindingTestContext implements AutoCloseable {
@@ -133,8 +134,10 @@ public class BindingTestContext implements AutoCloseable {
 
     public void startNewDomDataBroker() {
         checkState(executor != null, "Executor needs to be set");
-        InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", executor);
-        InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", executor);
+        InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", executor,
+                MoreExecutors.sameThreadExecutor());
+        InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", executor,
+                MoreExecutors.sameThreadExecutor());
         newDatastores = ImmutableMap.<LogicalDatastoreType, DOMStore>builder()
                 .put(LogicalDatastoreType.OPERATIONAL, operStore)
                 .put(LogicalDatastoreType.CONFIGURATION, configStore)
index cf5174319d2128518f70391d0d459ad5af26b2cf..1dd0f3b8278407752b91afbb24548eb857385cbc 100644 (file)
@@ -12,18 +12,19 @@ package org.opendaylight.controller.cluster.datastore.node.utils;
 
 public class PathUtils {
     public static String getParentPath(String currentElementPath){
-        String parentPath = "";
+        StringBuilder parentPath = new StringBuilder();
 
         if(currentElementPath != null){
             String[] parentPaths = currentElementPath.split("/");
             if(parentPaths.length > 2){
                 for(int i=0;i<parentPaths.length-1;i++){
                     if(parentPaths[i].length() > 0){
-                        parentPath += "/" + parentPaths[i];
+                        parentPath.append("/");
+                        parentPath.append(parentPaths[i]);
                     }
                 }
             }
         }
-        return parentPath;
+        return parentPath.toString();
     }
 }
index 33ac9f6ca1c7c8115071d07549999b848fab2d3e..bdd66d3aba25e4766960ed0611362cf813d5360c 100644 (file)
@@ -6579,6 +6579,933 @@ public final class ShardTransactionMessages {
     // @@protoc_insertion_point(class_scope:org.opendaylight.controller.mdsal.MergeDataReply)
   }
 
+  public interface DataExistsOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;
+    /**
+     * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+     */
+    boolean hasInstanceIdentifierPathArguments();
+    /**
+     * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+     */
+    org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier getInstanceIdentifierPathArguments();
+    /**
+     * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+     */
+    org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder getInstanceIdentifierPathArgumentsOrBuilder();
+  }
+  /**
+   * Protobuf type {@code org.opendaylight.controller.mdsal.DataExists}
+   */
+  public static final class DataExists extends
+      com.google.protobuf.GeneratedMessage
+      implements DataExistsOrBuilder {
+    // Use DataExists.newBuilder() to construct.
+    private DataExists(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private DataExists(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final DataExists defaultInstance;
+    public static DataExists getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public DataExists getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private DataExists(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = instanceIdentifierPathArguments_.toBuilder();
+              }
+              instanceIdentifierPathArguments_ = input.readMessage(org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(instanceIdentifierPathArguments_);
+                instanceIdentifierPathArguments_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExists_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExists_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.class, org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<DataExists> PARSER =
+        new com.google.protobuf.AbstractParser<DataExists>() {
+      public DataExists parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new DataExists(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<DataExists> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;
+    public static final int INSTANCEIDENTIFIERPATHARGUMENTS_FIELD_NUMBER = 1;
+    private org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier instanceIdentifierPathArguments_;
+    /**
+     * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+     */
+    public boolean hasInstanceIdentifierPathArguments() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+     */
+    public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier getInstanceIdentifierPathArguments() {
+      return instanceIdentifierPathArguments_;
+    }
+    /**
+     * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+     */
+    public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder getInstanceIdentifierPathArgumentsOrBuilder() {
+      return instanceIdentifierPathArguments_;
+    }
+
+    private void initFields() {
+      instanceIdentifierPathArguments_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasInstanceIdentifierPathArguments()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getInstanceIdentifierPathArguments().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, instanceIdentifierPathArguments_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, instanceIdentifierPathArguments_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.opendaylight.controller.mdsal.DataExists}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExists_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExists_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.class, org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.Builder.class);
+      }
+
+      // Construct using org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getInstanceIdentifierPathArgumentsFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (instanceIdentifierPathArgumentsBuilder_ == null) {
+          instanceIdentifierPathArguments_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance();
+        } else {
+          instanceIdentifierPathArgumentsBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExists_descriptor;
+      }
+
+      public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists getDefaultInstanceForType() {
+        return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.getDefaultInstance();
+      }
+
+      public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists build() {
+        org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists buildPartial() {
+        org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists result = new org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (instanceIdentifierPathArgumentsBuilder_ == null) {
+          result.instanceIdentifierPathArguments_ = instanceIdentifierPathArguments_;
+        } else {
+          result.instanceIdentifierPathArguments_ = instanceIdentifierPathArgumentsBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists) {
+          return mergeFrom((org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists other) {
+        if (other == org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists.getDefaultInstance()) return this;
+        if (other.hasInstanceIdentifierPathArguments()) {
+          mergeInstanceIdentifierPathArguments(other.getInstanceIdentifierPathArguments());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasInstanceIdentifierPathArguments()) {
+          
+          return false;
+        }
+        if (!getInstanceIdentifierPathArguments().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExists) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;
+      private org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier instanceIdentifierPathArguments_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder> instanceIdentifierPathArgumentsBuilder_;
+      /**
+       * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+       */
+      public boolean hasInstanceIdentifierPathArguments() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+       */
+      public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier getInstanceIdentifierPathArguments() {
+        if (instanceIdentifierPathArgumentsBuilder_ == null) {
+          return instanceIdentifierPathArguments_;
+        } else {
+          return instanceIdentifierPathArgumentsBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+       */
+      public Builder setInstanceIdentifierPathArguments(org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier value) {
+        if (instanceIdentifierPathArgumentsBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          instanceIdentifierPathArguments_ = value;
+          onChanged();
+        } else {
+          instanceIdentifierPathArgumentsBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+       */
+      public Builder setInstanceIdentifierPathArguments(
+          org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder builderForValue) {
+        if (instanceIdentifierPathArgumentsBuilder_ == null) {
+          instanceIdentifierPathArguments_ = builderForValue.build();
+          onChanged();
+        } else {
+          instanceIdentifierPathArgumentsBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+       */
+      public Builder mergeInstanceIdentifierPathArguments(org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier value) {
+        if (instanceIdentifierPathArgumentsBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              instanceIdentifierPathArguments_ != org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance()) {
+            instanceIdentifierPathArguments_ =
+              org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.newBuilder(instanceIdentifierPathArguments_).mergeFrom(value).buildPartial();
+          } else {
+            instanceIdentifierPathArguments_ = value;
+          }
+          onChanged();
+        } else {
+          instanceIdentifierPathArgumentsBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+       */
+      public Builder clearInstanceIdentifierPathArguments() {
+        if (instanceIdentifierPathArgumentsBuilder_ == null) {
+          instanceIdentifierPathArguments_ = org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.getDefaultInstance();
+          onChanged();
+        } else {
+          instanceIdentifierPathArgumentsBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+       */
+      public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder getInstanceIdentifierPathArgumentsBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getInstanceIdentifierPathArgumentsFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+       */
+      public org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder getInstanceIdentifierPathArgumentsOrBuilder() {
+        if (instanceIdentifierPathArgumentsBuilder_ != null) {
+          return instanceIdentifierPathArgumentsBuilder_.getMessageOrBuilder();
+        } else {
+          return instanceIdentifierPathArguments_;
+        }
+      }
+      /**
+       * <code>required .org.opendaylight.controller.mdsal.InstanceIdentifier instanceIdentifierPathArguments = 1;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder> 
+          getInstanceIdentifierPathArgumentsFieldBuilder() {
+        if (instanceIdentifierPathArgumentsBuilder_ == null) {
+          instanceIdentifierPathArgumentsBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifier.Builder, org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages.InstanceIdentifierOrBuilder>(
+                  instanceIdentifierPathArguments_,
+                  getParentForChildren(),
+                  isClean());
+          instanceIdentifierPathArguments_ = null;
+        }
+        return instanceIdentifierPathArgumentsBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.DataExists)
+    }
+
+    static {
+      defaultInstance = new DataExists(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.opendaylight.controller.mdsal.DataExists)
+  }
+
+  public interface DataExistsReplyOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required bool exists = 1;
+    /**
+     * <code>required bool exists = 1;</code>
+     */
+    boolean hasExists();
+    /**
+     * <code>required bool exists = 1;</code>
+     */
+    boolean getExists();
+  }
+  /**
+   * Protobuf type {@code org.opendaylight.controller.mdsal.DataExistsReply}
+   */
+  public static final class DataExistsReply extends
+      com.google.protobuf.GeneratedMessage
+      implements DataExistsReplyOrBuilder {
+    // Use DataExistsReply.newBuilder() to construct.
+    private DataExistsReply(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private DataExistsReply(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final DataExistsReply defaultInstance;
+    public static DataExistsReply getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public DataExistsReply getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private DataExistsReply(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              exists_ = input.readBool();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExistsReply_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExistsReply_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.class, org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<DataExistsReply> PARSER =
+        new com.google.protobuf.AbstractParser<DataExistsReply>() {
+      public DataExistsReply parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new DataExistsReply(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<DataExistsReply> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required bool exists = 1;
+    public static final int EXISTS_FIELD_NUMBER = 1;
+    private boolean exists_;
+    /**
+     * <code>required bool exists = 1;</code>
+     */
+    public boolean hasExists() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required bool exists = 1;</code>
+     */
+    public boolean getExists() {
+      return exists_;
+    }
+
+    private void initFields() {
+      exists_ = false;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasExists()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBool(1, exists_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(1, exists_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code org.opendaylight.controller.mdsal.DataExistsReply}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReplyOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExistsReply_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExistsReply_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.class, org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.Builder.class);
+      }
+
+      // Construct using org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        exists_ = false;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.internal_static_org_opendaylight_controller_mdsal_DataExistsReply_descriptor;
+      }
+
+      public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply getDefaultInstanceForType() {
+        return org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.getDefaultInstance();
+      }
+
+      public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply build() {
+        org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply buildPartial() {
+        org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply result = new org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.exists_ = exists_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply) {
+          return mergeFrom((org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply other) {
+        if (other == org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply.getDefaultInstance()) return this;
+        if (other.hasExists()) {
+          setExists(other.getExists());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasExists()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.DataExistsReply) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required bool exists = 1;
+      private boolean exists_ ;
+      /**
+       * <code>required bool exists = 1;</code>
+       */
+      public boolean hasExists() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required bool exists = 1;</code>
+       */
+      public boolean getExists() {
+        return exists_;
+      }
+      /**
+       * <code>required bool exists = 1;</code>
+       */
+      public Builder setExists(boolean value) {
+        bitField0_ |= 0x00000001;
+        exists_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required bool exists = 1;</code>
+       */
+      public Builder clearExists() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        exists_ = false;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:org.opendaylight.controller.mdsal.DataExistsReply)
+    }
+
+    static {
+      defaultInstance = new DataExistsReply(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:org.opendaylight.controller.mdsal.DataExistsReply)
+  }
+
   private static com.google.protobuf.Descriptors.Descriptor
     internal_static_org_opendaylight_controller_mdsal_CloseTransaction_descriptor;
   private static
@@ -6649,6 +7576,16 @@ public final class ShardTransactionMessages {
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_org_opendaylight_controller_mdsal_MergeDataReply_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_opendaylight_controller_mdsal_DataExists_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_opendaylight_controller_mdsal_DataExists_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_org_opendaylight_controller_mdsal_DataExistsReply_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_org_opendaylight_controller_mdsal_DataExistsReply_fieldAccessorTable;
 
   public static com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -6683,10 +7620,13 @@ public final class ShardTransactionMessages {
       "thArguments\030\001 \002(\01325.org.opendaylight.con" +
       "troller.mdsal.InstanceIdentifier\022?\n\016norm" +
       "alizedNode\030\002 \002(\0132\'.org.opendaylight.cont" +
-      "roller.mdsal.Node\"\020\n\016MergeDataReplyBV\n:o" +
-      "rg.opendaylight.controller.protobuff.mes" +
-      "sages.transactionB\030ShardTransactionMessa" +
-      "ges"
+      "roller.mdsal.Node\"\020\n\016MergeDataReply\"l\n\nD" +
+      "ataExists\022^\n\037instanceIdentifierPathArgum" +
+      "ents\030\001 \002(\01325.org.opendaylight.controller" +
+      ".mdsal.InstanceIdentifier\"!\n\017DataExistsR" +
+      "eply\022\016\n\006exists\030\001 \002(\010BV\n:org.opendaylight",
+      ".controller.protobuff.messages.transacti" +
+      "onB\030ShardTransactionMessages"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -6777,6 +7717,18 @@ public final class ShardTransactionMessages {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_org_opendaylight_controller_mdsal_MergeDataReply_descriptor,
               new java.lang.String[] { });
+          internal_static_org_opendaylight_controller_mdsal_DataExists_descriptor =
+            getDescriptor().getMessageTypes().get(14);
+          internal_static_org_opendaylight_controller_mdsal_DataExists_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_opendaylight_controller_mdsal_DataExists_descriptor,
+              new java.lang.String[] { "InstanceIdentifierPathArguments", });
+          internal_static_org_opendaylight_controller_mdsal_DataExistsReply_descriptor =
+            getDescriptor().getMessageTypes().get(15);
+          internal_static_org_opendaylight_controller_mdsal_DataExistsReply_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_org_opendaylight_controller_mdsal_DataExistsReply_descriptor,
+              new java.lang.String[] { "Exists", });
           return null;
         }
       };
index 4177bd7a052c244385e75df38dcf0fa31523b67c..63b75ac430f9bbbaca2ad8d1ecb7c63f0cff37d2 100644 (file)
@@ -65,3 +65,11 @@ required Node normalizedNode =2;
 message MergeDataReply{
 
 }
+
+message DataExists {
+  required InstanceIdentifier instanceIdentifierPathArguments = 1;
+}
+
+message DataExistsReply {
+  required bool exists = 1;
+}
index 1b85d46fc6ebdfa9b13de079db728b9dadf2beac..bdad86ddc1e8fb5521607fbb91a575a99e677d38 100644 (file)
@@ -78,8 +78,12 @@ public class NormalizedNodeToNodeCodecTest {
 
     NormalizedNodeToNodeCodec codec =
         new NormalizedNodeToNodeCodec(schemaContext);
+    long start = System.currentTimeMillis();
     Container container =
         codec.encode(instanceIdentifierFromString(id), output);
+    long end = System.currentTimeMillis();
+
+    System.out.println("Timetaken to encode :"+(end-start));
 
     assertNotNull(container);
     assertEquals(id, container.getParentPath() + "/"
@@ -89,8 +93,12 @@ public class NormalizedNodeToNodeCodecTest {
     // first get the node representation of normalized node
     final Node node = container.getNormalizedNode();
 
+    start = System.currentTimeMillis();
     NormalizedNode<?, ?> normalizedNode =
         codec.decode(instanceIdentifierFromString(id), node);
+    end = System.currentTimeMillis();
+
+    System.out.println("Timetaken to decode :"+(end-start));
 
     assertEquals(normalizedNode.getValue().toString(), output.getValue()
         .toString());
index 7891ee2088d4c3a183682e892f51588c2a14a9f5..72da6304e54f2c19499189fa13ca0e74e30172db 100644 (file)
                 <module>
                     <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:distributed-datastore-provider">prefix:distributed-operational-datastore-provider</type>
                     <name>distributed-operational-store-module</name>
-                    <schema-service>
+                    <operational-schema-service>
                         <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
                         <name>yang-schema-service</name>
-                    </schema-service>
+                    </operational-schema-service>
                 </module>
 
                 <module>
                     <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:config:distributed-datastore-provider">prefix:distributed-config-datastore-provider</type>
                     <name>distributed-config-store-module</name>
-                    <schema-service>
+                    <config-schema-service>
                         <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:schema-service</type>
                         <name>yang-schema-service</name>
-                    </schema-service>
+                    </config-schema-service>
                 </module>
 
                 <module>
index 9749ae27ae671559c4fb5b08c85620ece27188a3..5bf231dbe1ca31040565bad6b82d90201a4c0df2 100644 (file)
@@ -18,9 +18,9 @@ odl-cluster-data {
       netty.tcp {
         hostname = "<CHANGE_ME>"
         port = 2550
-           maximum-frame-size = 2097152
-           send-buffer-size = 52428800
-           receive-buffer-size = 52428800
+        maximum-frame-size = 419430400
+        send-buffer-size = 52428800
+        receive-buffer-size = 52428800
       }
     }
 
index 848d425bf9d8a712d42a873b564a1657b9d95953..648e8d23d06892f8964ed65e322b7cb988ee8194 100644 (file)
       <artifactId>akka-testkit_${scala.version}</artifactId>
     </dependency>
 
+    <dependency>
+      <groupId>com.typesafe.akka</groupId>
+      <artifactId>akka-slf4j_${scala.version}</artifactId>
+    </dependency>
+
     <!-- SAL Dependencies -->
 
     <dependency>
index ce0516064ea7adc355df09f45986449553b3460e..ac01f42a7fb017a9e33b330c64e6b5178aa95ff2 100644 (file)
@@ -33,4 +33,12 @@ public abstract class AbstractUntypedActor extends UntypedActor {
     }
 
     protected abstract void handleReceive(Object message) throws Exception;
+
+    protected void ignoreMessage(Object message){
+        LOG.debug("Unhandled message {} ", message);
+    }
+
+    protected void unknownMessage(Object message) throws Exception{
+        unhandled(message);
+    }
 }
index 142aacde65c32f1050846810c8d33b1c9122be55..8910137ec4583272b55ead555b0ed12e11caa02c 100644 (file)
@@ -12,18 +12,31 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
 import akka.cluster.Cluster;
 import akka.cluster.ClusterEvent;
+import com.google.common.base.Preconditions;
 
 public class ClusterWrapperImpl implements ClusterWrapper {
     private final Cluster cluster;
     private final String currentMemberName;
 
     public ClusterWrapperImpl(ActorSystem actorSystem){
+        Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
+
         cluster = Cluster.get(actorSystem);
+
+        Preconditions.checkState(cluster.getSelfRoles().size() > 0,
+            "No akka roles were specified\n" +
+                "One way to specify the member name is to pass a property on the command line like so\n" +
+                "   -Dakka.cluster.roles.0=member-3\n" +
+                "member-3 here would be the name of the member"
+        );
+
         currentMemberName = (String) cluster.getSelfRoles().toArray()[0];
 
     }
 
     public void subscribeToMemberEvents(ActorRef actorRef){
+        Preconditions.checkNotNull(actorRef, "actorRef should not be null");
+
         cluster.subscribe(actorRef, ClusterEvent.initialStateAsEvents(),
             ClusterEvent.MemberEvent.class,
             ClusterEvent.UnreachableMember.class);
index abc69f18975aeb671713e955e11498f1c7d55050..d0abb20718bd87dfc13f862bbe6f4672878abc35 100644 (file)
@@ -29,7 +29,7 @@ public class CompositeModificationPayload extends Payload implements
         modification = null;
     }
     public CompositeModificationPayload(Object modification){
-        this.modification = (PersistentMessages.CompositeModification) modification;
+        this.modification = (PersistentMessages.CompositeModification) Preconditions.checkNotNull(modification, "modification should not be null");
     }
 
     @Override public Map<GeneratedMessage.GeneratedExtension, PersistentMessages.CompositeModification> encode() {
index 34590025d59ab78d3e0095e81addaa8ed874935f..37b565d2131debe797ca2ab975baf82550740a68 100644 (file)
@@ -9,6 +9,7 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
 import com.typesafe.config.Config;
 import com.typesafe.config.ConfigFactory;
 import com.typesafe.config.ConfigObject;
@@ -34,11 +35,23 @@ public class ConfigurationImpl implements Configuration {
     private static final Logger
         LOG = LoggerFactory.getLogger(DistributedDataStore.class);
 
+    // Look up maps to speed things up
+
+    // key = memberName, value = list of shardNames
+    private Map<String, List<String>> memberShardNames = new HashMap<>();
+
+    // key = shardName, value = list of replicaNames (replicaNames are the same as memberNames)
+    private Map<String, List<String>> shardReplicaNames = new HashMap<>();
+
 
     public ConfigurationImpl(String moduleShardsConfigPath,
 
         String modulesConfigPath){
 
+        Preconditions.checkNotNull(moduleShardsConfigPath, "moduleShardsConfigPath should not be null");
+        Preconditions.checkNotNull(modulesConfigPath, "modulesConfigPath should not be null");
+
+
         File moduleShardsFile = new File("./configuration/initial/" + moduleShardsConfigPath);
         File modulesFile = new File("./configuration/initial/" + modulesConfigPath);
 
@@ -66,6 +79,13 @@ public class ConfigurationImpl implements Configuration {
     }
 
     @Override public List<String> getMemberShardNames(String memberName){
+
+        Preconditions.checkNotNull(memberName, "memberName should not be null");
+
+        if(memberShardNames.containsKey(memberName)){
+            return memberShardNames.get(memberName);
+        }
+
         List<String> shards = new ArrayList();
         for(ModuleShard ms : moduleShards){
             for(Shard s : ms.getShards()){
@@ -76,11 +96,17 @@ public class ConfigurationImpl implements Configuration {
                 }
             }
         }
+
+        memberShardNames.put(memberName, shards);
+
         return shards;
 
     }
 
     @Override public Optional<String> getModuleNameFromNameSpace(String nameSpace) {
+
+        Preconditions.checkNotNull(nameSpace, "nameSpace should not be null");
+
         for(Module m : modules){
             if(m.getNameSpace().equals(nameSpace)){
                 return Optional.of(m.getName());
@@ -98,6 +124,9 @@ public class ConfigurationImpl implements Configuration {
     }
 
     @Override public List<String> getShardNamesFromModuleName(String moduleName) {
+
+        Preconditions.checkNotNull(moduleName, "moduleName should not be null");
+
         for(ModuleShard m : moduleShards){
             if(m.getModuleName().equals(moduleName)){
                 List<String> l = new ArrayList<>();
@@ -112,14 +141,23 @@ public class ConfigurationImpl implements Configuration {
     }
 
     @Override public List<String> getMembersFromShardName(String shardName) {
-        List<String> shards = new ArrayList();
+
+        Preconditions.checkNotNull(shardName, "shardName should not be null");
+
+        if(shardReplicaNames.containsKey(shardName)){
+            return shardReplicaNames.get(shardName);
+        }
+
         for(ModuleShard ms : moduleShards){
             for(Shard s : ms.getShards()) {
                 if(s.getName().equals(shardName)){
-                    return s.getReplicas();
+                    List<String> replicas = s.getReplicas();
+                    shardReplicaNames.put(shardName, replicas);
+                    return replicas;
                 }
             }
         }
+        shardReplicaNames.put(shardName, Collections.EMPTY_LIST);
         return Collections.EMPTY_LIST;
     }
 
index cdf04dd093e95676b6ce9d635125abc8aa2b602e..1dab285679474378b47c1c0a1c488ebf89d66fea 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.Props;
 import akka.japi.Creator;
+import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.datastore.messages.DataChanged;
 import org.opendaylight.controller.cluster.datastore.messages.DataChangedReply;
 import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
@@ -27,9 +28,10 @@ public class DataChangeListener extends AbstractUntypedActor {
 
     public DataChangeListener(SchemaContext schemaContext,
                               AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener, YangInstanceIdentifier pathId) {
-        this.listener = listener;
-        this.schemaContext = schemaContext;
-        this.pathId  = pathId;
+
+        this.schemaContext = Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
+        this.listener = Preconditions.checkNotNull(listener, "listener should not be null");
+        this.pathId  = Preconditions.checkNotNull(pathId, "pathId should not be null");
     }
 
     @Override public void handleReceive(Object message) throws Exception {
@@ -44,7 +46,7 @@ public class DataChangeListener extends AbstractUntypedActor {
         notificationsEnabled = message.isEnabled();
     }
 
-    public void dataChanged(Object message) {
+    private void dataChanged(Object message) {
 
         // Do nothing if notifications are not enabled
         if(!notificationsEnabled){
index a4ca4562685d4f4b3dc717588d0e35ede957d466..6d835498afa2af24ec915e3b010e67710f685d51 100644 (file)
@@ -9,6 +9,7 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorSelection;
+import com.google.common.base.Preconditions;
 import org.opendaylight.controller.cluster.datastore.messages.DataChanged;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
@@ -24,7 +25,7 @@ public class DataChangeListenerProxy implements AsyncDataChangeListener<YangInst
     private final SchemaContext schemaContext;
 
     public DataChangeListenerProxy(SchemaContext schemaContext,ActorSelection dataChangeListenerActor) {
-        this.dataChangeListenerActor = dataChangeListenerActor;
+        this.dataChangeListenerActor = Preconditions.checkNotNull(dataChangeListenerActor, "dataChangeListenerActor should not be null");
         this.schemaContext = schemaContext;
     }
 
index 479af79748033342041f32fe5221e68f78bf1c2f..40e045f18e31bdfbcf2eb34c9fa0847a778f94f4 100644 (file)
@@ -10,8 +10,10 @@ package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
 import akka.actor.ActorSystem;
+import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.MoreExecutors;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
@@ -25,6 +27,8 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransactio
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.util.PropertyUtils;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
@@ -32,8 +36,6 @@ import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.concurrent.Executors;
-
 /**
  *
  */
@@ -42,35 +44,52 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, Au
     private static final Logger
         LOG = LoggerFactory.getLogger(DistributedDataStore.class);
 
-    private static final int DEFAULT_EXECUTOR_POOL_SIZE = 10;
+    private static final String EXECUTOR_MAX_POOL_SIZE_PROP =
+            "mdsal.dist-datastore-executor-pool.size";
+    private static final int DEFAULT_EXECUTOR_MAX_POOL_SIZE = 10;
+
+    private static final String EXECUTOR_MAX_QUEUE_SIZE_PROP =
+            "mdsal.dist-datastore-executor-queue.size";
+    private static final int DEFAULT_EXECUTOR_MAX_QUEUE_SIZE = 5000;
 
-    private final String type;
     private final ActorContext actorContext;
 
     private SchemaContext schemaContext;
 
-
-
     /**
      * Executor used to run FutureTask's
      *
      * This is typically used when we need to make a request to an actor and
      * wait for it's response and the consumer needs to be provided a Future.
-     *
-     * FIXME : Make the thread pool size configurable.
      */
     private final ListeningExecutorService executor =
-        MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(DEFAULT_EXECUTOR_POOL_SIZE));
+            MoreExecutors.listeningDecorator(
+                    SpecialExecutors.newBlockingBoundedFastThreadPool(
+                            PropertyUtils.getIntSystemProperty(
+                                    EXECUTOR_MAX_POOL_SIZE_PROP,
+                                    DEFAULT_EXECUTOR_MAX_POOL_SIZE),
+                            PropertyUtils.getIntSystemProperty(
+                                    EXECUTOR_MAX_QUEUE_SIZE_PROP,
+                                    DEFAULT_EXECUTOR_MAX_QUEUE_SIZE), "DistDataStore"));
 
     public DistributedDataStore(ActorSystem actorSystem, String type, ClusterWrapper cluster, Configuration configuration) {
-        this(new ActorContext(actorSystem, actorSystem
+        Preconditions.checkNotNull(actorSystem, "actorSystem should not be null");
+        Preconditions.checkNotNull(type, "type should not be null");
+        Preconditions.checkNotNull(cluster, "cluster should not be null");
+        Preconditions.checkNotNull(configuration, "configuration should not be null");
+
+
+        String shardManagerId = ShardManagerIdentifier.builder().type(type).build().toString();
+
+        LOG.info("Creating ShardManager : {}", shardManagerId);
+
+        this.actorContext = new ActorContext(actorSystem, actorSystem
             .actorOf(ShardManager.props(type, cluster, configuration),
-                "shardmanager-" + type), cluster, configuration), type);
+                shardManagerId ), cluster, configuration);
     }
 
-    public DistributedDataStore(ActorContext actorContext, String type) {
-        this.type = type;
-        this.actorContext = actorContext;
+    public DistributedDataStore(ActorContext actorContext) {
+        this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
     }
 
 
@@ -79,6 +98,12 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener, Au
         YangInstanceIdentifier path, L listener,
         AsyncDataBroker.DataChangeScope scope) {
 
+        Preconditions.checkNotNull(path, "path should not be null");
+        Preconditions.checkNotNull(listener, "listener should not be null");
+
+
+        LOG.debug("Registering listener: {} for path: {} scope: {}", listener, path, scope);
+
         ActorRef dataChangeListenerActor = actorContext.getActorSystem().actorOf(
             DataChangeListener.props(schemaContext,listener,path ));
 
index 10dbbc84d873ee54b5421643c00b046043e58114..63b26331a510835ae623a9feade7893938d81f84 100644 (file)
@@ -16,9 +16,10 @@ import akka.event.LoggingAdapter;
 import akka.japi.Creator;
 import akka.serialization.Serialization;
 import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardTransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardMBeanFactory;
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard.ShardStats;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
@@ -37,8 +38,10 @@ import org.opendaylight.controller.cluster.datastore.modification.MutableComposi
 import org.opendaylight.controller.cluster.raft.ConfigParams;
 import org.opendaylight.controller.cluster.raft.DefaultConfigParamsImpl;
 import org.opendaylight.controller.cluster.raft.RaftActor;
+import org.opendaylight.controller.cluster.raft.ReplicatedLogEntry;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
@@ -48,11 +51,11 @@ import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import scala.concurrent.duration.FiniteDuration;
 
 import java.util.ArrayList;
+import java.util.Date;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 
 /**
@@ -67,9 +70,7 @@ public class Shard extends RaftActor {
 
     public static final String DEFAULT_NAME = "default";
 
-    private final ListeningExecutorService storeExecutor =
-        MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(2));
-
+    // The state of this Shard
     private final InMemoryDOMDataStore store;
 
     private final Map<Object, DOMStoreThreePhaseCommitCohort>
@@ -79,10 +80,11 @@ public class Shard extends RaftActor {
         Logging.getLogger(getContext().system(), this);
 
     // By default persistent will be true and can be turned off using the system
-    // property persistent
+    // property shard.persistent
     private final boolean persistent;
 
-    private final String name;
+    /// The name of this shard
+    private final ShardIdentifier name;
 
     private volatile SchemaContext schemaContext;
 
@@ -90,8 +92,8 @@ public class Shard extends RaftActor {
 
     private final List<ActorSelection> dataChangeListeners = new ArrayList<>();
 
-    private Shard(String name, Map<String, String> peerAddresses) {
-        super(name, peerAddresses, Optional.of(configParams));
+    private Shard(ShardIdentifier name, Map<ShardIdentifier, String> peerAddresses) {
+        super(name.toString(), mapPeerAddresses(peerAddresses), Optional.of(configParams));
 
         this.name = name;
 
@@ -99,15 +101,32 @@ public class Shard extends RaftActor {
 
         this.persistent = !"false".equals(setting);
 
-        LOG.info("Creating shard : {} persistent : {}", name, persistent);
+        LOG.info("Shard created : {} persistent : {}", name, persistent);
 
-        store = new InMemoryDOMDataStore(name, storeExecutor);
+        store = InMemoryDOMDataStoreFactory.create(name.toString(), null);
 
-        shardMBean = ShardMBeanFactory.getShardStatsMBean(name);
+        shardMBean = ShardMBeanFactory.getShardStatsMBean(name.toString());
 
     }
 
-    public static Props props(final String name, final Map<String, String> peerAddresses) {
+    private static Map<String, String> mapPeerAddresses(Map<ShardIdentifier, String> peerAddresses){
+        Map<String , String> map = new HashMap<>();
+
+        for(Map.Entry<ShardIdentifier, String> entry : peerAddresses.entrySet()){
+            map.put(entry.getKey().toString(), entry.getValue());
+        }
+
+        return map;
+    }
+
+
+
+
+    public static Props props(final ShardIdentifier name,
+        final Map<ShardIdentifier, String> peerAddresses) {
+        Preconditions.checkNotNull(name, "name should not be null");
+        Preconditions.checkNotNull(peerAddresses, "peerAddresses should not be null");
+
         return Props.create(new Creator<Shard>() {
 
             @Override
@@ -119,13 +138,15 @@ public class Shard extends RaftActor {
     }
 
 
-    @Override public void onReceiveCommand(Object message){
-        LOG.debug("Received message {} from {}", message.getClass().toString(), getSender());
+    @Override public void onReceiveCommand(Object message) {
+        LOG.debug("Received message {} from {}", message.getClass().toString(),
+            getSender());
 
-        if (message.getClass().equals(CreateTransactionChain.SERIALIZABLE_CLASS)) {
-            if(isLeader()) {
+        if (message.getClass()
+            .equals(CreateTransactionChain.SERIALIZABLE_CLASS)) {
+            if (isLeader()) {
                 createTransactionChain();
-            } else if(getLeader() != null){
+            } else if (getLeader() != null) {
                 getLeader().forward(message, getContext());
             }
         } else if (message instanceof RegisterChangeListener) {
@@ -134,57 +155,84 @@ public class Shard extends RaftActor {
             updateSchemaContext((UpdateSchemaContext) message);
         } else if (message instanceof ForwardedCommitTransaction) {
             handleForwardedCommit((ForwardedCommitTransaction) message);
-        } else if (message.getClass().equals(CreateTransaction.SERIALIZABLE_CLASS)) {
-            if(isLeader()) {
+        } else if (message.getClass()
+            .equals(CreateTransaction.SERIALIZABLE_CLASS)) {
+            if (isLeader()) {
                 createTransaction(CreateTransaction.fromSerializable(message));
-            } else if(getLeader() != null){
+            } else if (getLeader() != null) {
                 getLeader().forward(message, getContext());
             }
-        } else if (message instanceof PeerAddressResolved){
+        } else if (message instanceof PeerAddressResolved) {
             PeerAddressResolved resolved = (PeerAddressResolved) message;
-            setPeerAddress(resolved.getPeerId(), resolved.getPeerAddress());
+            setPeerAddress(resolved.getPeerId().toString(), resolved.getPeerAddress());
         } else {
-          super.onReceiveCommand(message);
+            super.onReceiveCommand(message);
         }
     }
 
-   private ActorRef createTypedTransactionActor(CreateTransaction createTransaction,String transactionId){
-      if(createTransaction.getTransactionType()== TransactionProxy.TransactionType.READ_ONLY.ordinal()){
-        return getContext().actorOf(
-            ShardTransaction.props( store.newReadOnlyTransaction(), getSelf(), schemaContext), transactionId);
+    private ActorRef createTypedTransactionActor(
+        CreateTransaction createTransaction, ShardTransactionIdentifier transactionId) {
+        if (createTransaction.getTransactionType()
+            == TransactionProxy.TransactionType.READ_ONLY.ordinal()) {
+
+            shardMBean.incrementReadOnlyTransactionCount();
+
+            return getContext().actorOf(
+                ShardTransaction
+                    .props(store.newReadOnlyTransaction(), getSelf(),
+                        schemaContext), transactionId.toString());
+
+        } else if (createTransaction.getTransactionType()
+            == TransactionProxy.TransactionType.READ_WRITE.ordinal()) {
 
-      }else if (createTransaction.getTransactionType()== TransactionProxy.TransactionType.READ_WRITE.ordinal()){
-        return getContext().actorOf(
-            ShardTransaction.props( store.newReadWriteTransaction(), getSelf(), schemaContext), transactionId);
+            shardMBean.incrementReadWriteTransactionCount();
 
+            return getContext().actorOf(
+                ShardTransaction
+                    .props(store.newReadWriteTransaction(), getSelf(),
+                        schemaContext), transactionId.toString());
 
-      }else if (createTransaction.getTransactionType()== TransactionProxy.TransactionType.WRITE_ONLY.ordinal()){
-        return getContext().actorOf(
-            ShardTransaction.props( store.newWriteOnlyTransaction(), getSelf(), schemaContext), transactionId);
-      }else{
-        throw new IllegalArgumentException ("CreateTransaction message has unidentified transaction type="+createTransaction.getTransactionType()) ;
-      }
-   }
+
+        } else if (createTransaction.getTransactionType()
+            == TransactionProxy.TransactionType.WRITE_ONLY.ordinal()) {
+
+            shardMBean.incrementWriteOnlyTransactionCount();
+
+            return getContext().actorOf(
+                ShardTransaction
+                    .props(store.newWriteOnlyTransaction(), getSelf(),
+                        schemaContext), transactionId.toString());
+        } else {
+            // FIXME: This does not seem right
+            throw new IllegalArgumentException(
+                "CreateTransaction message has unidentified transaction type="
+                    + createTransaction.getTransactionType());
+        }
+    }
 
     private void createTransaction(CreateTransaction createTransaction) {
 
-        String transactionId = "shard-" + createTransaction.getTransactionId();
-        LOG.info("Creating transaction : {} " , transactionId);
-        ActorRef transactionActor = createTypedTransactionActor(createTransaction,transactionId);
+        ShardTransactionIdentifier transactionId = ShardTransactionIdentifier.builder().remoteTransactionId(createTransaction.getTransactionId()).build();
+        LOG.debug("Creating transaction : {} ", transactionId);
+        ActorRef transactionActor =
+            createTypedTransactionActor(createTransaction, transactionId);
 
         getSender()
-            .tell(new CreateTransactionReply(Serialization.serializedActorPath(transactionActor), createTransaction.getTransactionId()).toSerializable(),
+            .tell(new CreateTransactionReply(
+                Serialization.serializedActorPath(transactionActor),
+                createTransaction.getTransactionId()).toSerializable(),
                 getSelf());
     }
 
     private void commit(final ActorRef sender, Object serialized) {
-        Modification modification = MutableCompositeModification.fromSerializable(serialized, schemaContext);
+        Modification modification = MutableCompositeModification
+            .fromSerializable(serialized, schemaContext);
         DOMStoreThreePhaseCommitCohort cohort =
             modificationToCohort.remove(serialized);
         if (cohort == null) {
-            LOG.error(
-                "Could not find cohort for modification : {}", modification);
-            LOG.info("Writing modification using a new transaction");
+            LOG.debug(
+                "Could not find cohort for modification : {}. Writing modification using a new transaction",
+                modification);
             DOMStoreReadWriteTransaction transaction =
                 store.newReadWriteTransaction();
             modification.apply(transaction);
@@ -195,45 +243,46 @@ public class Shard extends RaftActor {
                 future.get();
                 future = commitCohort.commit();
                 future.get();
-            } catch (InterruptedException e) {
-                LOG.error("Failed to commit", e);
-            } catch (ExecutionException e) {
+            } catch (InterruptedException | ExecutionException e) {
+                shardMBean.incrementFailedTransactionsCount();
                 LOG.error("Failed to commit", e);
+                return;
             }
+            //we want to just apply the recovery commit and return
+            shardMBean.incrementCommittedTransactionCount();
+            return;
         }
 
         final ListenableFuture<Void> future = cohort.commit();
-        shardMBean.incrementCommittedTransactionCount();
         final ActorRef self = getSelf();
         future.addListener(new Runnable() {
             @Override
             public void run() {
                 try {
                     future.get();
-
-                    if(sender != null) {
                         sender
                             .tell(new CommitTransactionReply().toSerializable(),
                                 self);
-                    } else {
-                        LOG.error("sender is null ???");
-                    }
+                        shardMBean.incrementCommittedTransactionCount();
+                        shardMBean.setLastCommittedTransactionTime(new Date());
                 } catch (InterruptedException | ExecutionException e) {
-                    // FIXME : Handle this properly
-                    LOG.error(e, "An exception happened when committing");
+                    shardMBean.incrementFailedTransactionsCount();
+                    sender.tell(new akka.actor.Status.Failure(e),self);
                 }
             }
         }, getContext().dispatcher());
     }
 
     private void handleForwardedCommit(ForwardedCommitTransaction message) {
-        Object serializedModification = message.getModification().toSerializable();
+        Object serializedModification =
+            message.getModification().toSerializable();
 
         modificationToCohort
-            .put(serializedModification , message.getCohort());
+            .put(serializedModification, message.getCohort());
 
-        if(persistent) {
-            this.persistData(getSender(), "identifier", new CompositeModificationPayload(serializedModification));
+        if (persistent) {
+            this.persistData(getSender(), "identifier",
+                new CompositeModificationPayload(serializedModification));
         } else {
             this.commit(getSender(), serializedModification);
         }
@@ -247,7 +296,8 @@ public class Shard extends RaftActor {
     private void registerChangeListener(
         RegisterChangeListener registerChangeListener) {
 
-        LOG.debug("registerDataChangeListener for " + registerChangeListener.getPath());
+        LOG.debug("registerDataChangeListener for {}", registerChangeListener
+            .getPath());
 
 
         ActorSelection dataChangeListenerPath = getContext()
@@ -258,14 +308,16 @@ public class Shard extends RaftActor {
         // Notify the listener if notifications should be enabled or not
         // If this shard is the leader then it will enable notifications else
         // it will not
-        dataChangeListenerPath.tell(new EnableNotification(isLeader()), getSelf());
+        dataChangeListenerPath
+            .tell(new EnableNotification(isLeader()), getSelf());
 
         // Now store a reference to the data change listener so it can be notified
         // at a later point if notifications should be enabled or disabled
         dataChangeListeners.add(dataChangeListenerPath);
 
         AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>
-            listener = new DataChangeListenerProxy(schemaContext,dataChangeListenerPath);
+            listener =
+            new DataChangeListenerProxy(schemaContext, dataChangeListenerPath);
 
         org.opendaylight.yangtools.concepts.ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>
             registration =
@@ -275,7 +327,9 @@ public class Shard extends RaftActor {
             getContext().actorOf(
                 DataChangeListenerRegistration.props(registration));
 
-        LOG.debug("registerDataChangeListener sending reply, listenerRegistrationPath = " + listenerRegistration.path().toString());
+        LOG.debug(
+            "registerDataChangeListener sending reply, listenerRegistrationPath = {} "
+                , listenerRegistration.path().toString());
 
         getSender()
             .tell(new RegisterChangeListenerReply(listenerRegistration.path()),
@@ -289,21 +343,23 @@ public class Shard extends RaftActor {
                 ShardTransactionChain.props(chain, schemaContext));
         getSender()
             .tell(new CreateTransactionChainReply(transactionChain.path())
-                .toSerializable(),
+                    .toSerializable(),
                 getSelf());
     }
 
     @Override protected void applyState(ActorRef clientActor, String identifier,
         Object data) {
 
-        if(data instanceof CompositeModificationPayload){
+        if (data instanceof CompositeModificationPayload) {
             Object modification =
                 ((CompositeModificationPayload) data).getModification();
 
-            if(modification != null){
+            if (modification != null) {
                 commit(clientActor, modification);
             } else {
-                LOG.error("modification is null - this is very unexpected");
+                LOG.error(
+                    "modification is null - this is very unexpected, clientActor = {}, identifier = {}",
+                    identifier, clientActor.path().toString());
             }
 
 
@@ -311,6 +367,17 @@ public class Shard extends RaftActor {
             LOG.error("Unknown state received {}", data);
         }
 
+        // Update stats
+        ReplicatedLogEntry lastLogEntry = getLastLogEntry();
+
+        if(lastLogEntry != null){
+            shardMBean.setLastLogIndex(lastLogEntry.getIndex());
+            shardMBean.setLastLogTerm(lastLogEntry.getTerm());
+        }
+
+        shardMBean.setCommitIndex(getCommitIndex());
+        shardMBean.setLastApplied(getLastApplied());
+
     }
 
     @Override protected Object createSnapshot() {
@@ -322,19 +389,21 @@ public class Shard extends RaftActor {
     }
 
     @Override protected void onStateChanged() {
-        for(ActorSelection dataChangeListener : dataChangeListeners){
-            dataChangeListener.tell(new EnableNotification(isLeader()), getSelf());
+        for (ActorSelection dataChangeListener : dataChangeListeners) {
+            dataChangeListener
+                .tell(new EnableNotification(isLeader()), getSelf());
         }
 
-        if(getLeaderId() != null){
+        if (getLeaderId() != null) {
             shardMBean.setLeader(getLeaderId());
         }
 
         shardMBean.setRaftState(getRaftState().name());
+        shardMBean.setCurrentTerm(getCurrentTerm());
     }
 
     @Override public String persistenceId() {
-        return this.name;
+        return this.name.toString();
     }
 
 
index 64c6821120f94f99a389c12700757a7b8c7266f5..6162a0327ca6ab229be75b4f8b8c2c994dcb253a 100644 (file)
@@ -18,6 +18,10 @@ import akka.cluster.ClusterEvent;
 import akka.japi.Creator;
 import akka.japi.Function;
 import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfo;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager.ShardManagerInfoMBean;
 import org.opendaylight.controller.cluster.datastore.messages.FindLocalShard;
 import org.opendaylight.controller.cluster.datastore.messages.FindPrimary;
 import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
@@ -28,6 +32,7 @@ import org.opendaylight.controller.cluster.datastore.messages.PrimaryNotFound;
 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
 import scala.concurrent.duration.Duration;
 
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -61,6 +66,8 @@ public class ShardManager extends AbstractUntypedActor {
 
     private final Configuration configuration;
 
+    private ShardManagerInfoMBean mBean;
+
     /**
      * @param type defines the kind of data that goes into shards created by this shard manager. Examples of type would be
      *             configuration or operational
@@ -82,6 +89,11 @@ public class ShardManager extends AbstractUntypedActor {
     public static Props props(final String type,
         final ClusterWrapper cluster,
         final Configuration configuration) {
+
+        Preconditions.checkNotNull(type, "type should not be null");
+        Preconditions.checkNotNull(cluster, "cluster should not be null");
+        Preconditions.checkNotNull(configuration, "configuration should not be null");
+
         return Props.create(new Creator<ShardManager>() {
 
             @Override
@@ -108,7 +120,7 @@ public class ShardManager extends AbstractUntypedActor {
         } else if(message instanceof ClusterEvent.UnreachableMember) {
             ignoreMessage(message);
         } else{
-          throw new Exception ("Not recognized message received, message="+message);
+            unknownMessage(message);
         }
 
     }
@@ -122,11 +134,8 @@ public class ShardManager extends AbstractUntypedActor {
             return;
         }
 
-        getSender().tell(new LocalShardNotFound(message.getShardName()), getSelf());
-    }
-
-    private void ignoreMessage(Object message){
-        LOG.debug("Unhandled message : " + message);
+        getSender().tell(new LocalShardNotFound(message.getShardName()),
+            getSelf());
     }
 
     private void memberRemoved(ClusterEvent.MemberRemoved message) {
@@ -140,7 +149,7 @@ public class ShardManager extends AbstractUntypedActor {
 
         for(ShardInformation info : localShards.values()){
             String shardName = info.getShardName();
-            info.updatePeerAddress(getShardActorName(memberName, shardName),
+            info.updatePeerAddress(getShardIdentifier(memberName, shardName),
                 getShardActorPath(shardName, memberName));
         }
     }
@@ -159,9 +168,6 @@ public class ShardManager extends AbstractUntypedActor {
     private void findPrimary(FindPrimary message) {
         String shardName = message.getShardName();
 
-        List<String> members =
-            configuration.getMembersFromShardName(shardName);
-
         // First see if the there is a local replica for the shard
         ShardInformation info = localShards.get(shardName);
         if(info != null) {
@@ -175,6 +181,9 @@ public class ShardManager extends AbstractUntypedActor {
             }
         }
 
+        List<String> members =
+            configuration.getMembersFromShardName(shardName);
+
         if(cluster.getCurrentMemberName() != null) {
             members.remove(cluster.getCurrentMemberName());
         }
@@ -196,9 +205,13 @@ public class ShardManager extends AbstractUntypedActor {
     private String getShardActorPath(String shardName, String memberName) {
         Address address = memberNameToAddress.get(memberName);
         if(address != null) {
-            return address.toString() + "/user/shardmanager-" + this.type + "/"
-                + getShardActorName(
-                memberName, shardName);
+            StringBuilder builder = new StringBuilder();
+            builder.append(address.toString())
+                .append("/user/")
+                .append(ShardManagerIdentifier.builder().type(type).build().toString())
+                .append("/")
+                .append(getShardIdentifier(memberName, shardName));
+            return builder.toString();
         }
         return null;
     }
@@ -211,8 +224,8 @@ public class ShardManager extends AbstractUntypedActor {
      * @param shardName
      * @return
      */
-    private String getShardActorName(String memberName, String shardName){
-        return memberName + "-shard-" + shardName + "-" + this.type;
+    private ShardIdentifier getShardIdentifier(String memberName, String shardName){
+        return ShardIdentifier.builder().memberName(memberName).shardName(shardName).type(type).build();
     }
 
     /**
@@ -225,15 +238,20 @@ public class ShardManager extends AbstractUntypedActor {
         List<String> memberShardNames =
             this.configuration.getMemberShardNames(memberName);
 
+        List<String> localShardActorNames = new ArrayList<>();
         for(String shardName : memberShardNames){
-            String shardActorName = getShardActorName(memberName, shardName);
-            Map<String, String> peerAddresses = getPeerAddresses(shardName);
+            ShardIdentifier shardId = getShardIdentifier(memberName, shardName);
+            Map<ShardIdentifier, String> peerAddresses = getPeerAddresses(shardName);
             ActorRef actor = getContext()
-                .actorOf(Shard.props(shardActorName, peerAddresses),
-                    shardActorName);
+                .actorOf(Shard.props(shardId, peerAddresses),
+                    shardId.toString());
+            localShardActorNames.add(shardId.toString());
             localShards.put(shardName, new ShardInformation(shardName, actor, peerAddresses));
         }
 
+        mBean = ShardManagerInfo
+            .createShardManagerMBean("shard-manager-" + this.type, localShardActorNames);
+
     }
 
     /**
@@ -242,9 +260,9 @@ public class ShardManager extends AbstractUntypedActor {
      * @param shardName
      * @return
      */
-    private Map<String, String> getPeerAddresses(String shardName){
+    private Map<ShardIdentifier, String> getPeerAddresses(String shardName){
 
-        Map<String, String> peerAddresses = new HashMap<>();
+        Map<ShardIdentifier, String> peerAddresses = new HashMap<>();
 
         List<String> members =
             this.configuration.getMembersFromShardName(shardName);
@@ -253,16 +271,16 @@ public class ShardManager extends AbstractUntypedActor {
 
         for(String memberName : members){
             if(!currentMemberName.equals(memberName)){
-                String shardActorName = getShardActorName(memberName, shardName);
+                ShardIdentifier shardId = getShardIdentifier(memberName,
+                    shardName);
                 String path =
                     getShardActorPath(shardName, currentMemberName);
-                peerAddresses.put(shardActorName, path);
+                peerAddresses.put(shardId, path);
             }
         }
         return peerAddresses;
     }
 
-
     @Override
     public SupervisorStrategy supervisorStrategy() {
         return new OneForOneStrategy(10, Duration.create("1 minute"),
@@ -280,10 +298,10 @@ public class ShardManager extends AbstractUntypedActor {
         private final String shardName;
         private final ActorRef actor;
         private final ActorPath actorPath;
-        private final Map<String, String> peerAddresses;
+        private final Map<ShardIdentifier, String> peerAddresses;
 
         private ShardInformation(String shardName, ActorRef actor,
-            Map<String, String> peerAddresses) {
+            Map<ShardIdentifier, String> peerAddresses) {
             this.shardName = shardName;
             this.actor = actor;
             this.actorPath = actor.path();
@@ -302,16 +320,15 @@ public class ShardManager extends AbstractUntypedActor {
             return actorPath;
         }
 
-        public Map<String, String> getPeerAddresses() {
-            return peerAddresses;
-        }
-
-        public void updatePeerAddress(String peerId, String peerAddress){
-            LOG.info("updatePeerAddress for peer {} with address {}", peerId, peerAddress);
+        public void updatePeerAddress(ShardIdentifier peerId, String peerAddress){
+            LOG.info("updatePeerAddress for peer {} with address {}", peerId,
+                peerAddress);
             if(peerAddresses.containsKey(peerId)){
                 peerAddresses.put(peerId, peerAddress);
 
-                LOG.info("Sending PeerAddressResolved for peer {} with address {} to {}", peerId, peerAddress, actor.path());
+                LOG.debug(
+                    "Sending PeerAddressResolved for peer {} with address {} to {}",
+                    peerId, peerAddress, actor.path());
 
                 actor
                     .tell(new PeerAddressResolved(peerId, peerAddress),
@@ -321,3 +338,6 @@ public class ShardManager extends AbstractUntypedActor {
         }
     }
 }
+
+
+
index f78935b5e72b6c79070efe5b8cbd315fa535ac12..1328d466f34b6fff82a91b98379cd815b488155c 100644 (file)
@@ -16,6 +16,7 @@ import akka.event.Logging;
 import akka.event.LoggingAdapter;
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataExists;
 import org.opendaylight.controller.cluster.datastore.messages.ReadData;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
@@ -44,7 +45,9 @@ public class ShardReadTransaction extends ShardTransaction {
   @Override
   public void handleReceive(Object message) throws Exception {
     if (ReadData.SERIALIZABLE_CLASS.equals(message.getClass())) {
-      readData(transaction,ReadData.fromSerializable(message));
+        readData(transaction, ReadData.fromSerializable(message));
+    } else if(DataExists.SERIALIZABLE_CLASS.equals(message.getClass())) {
+        dataExists(transaction, DataExists.fromSerializable(message));
     } else {
       super.handleReceive(message);
     }
@@ -55,4 +58,9 @@ public class ShardReadTransaction extends ShardTransaction {
     getSelf().tell(PoisonPill.getInstance(), getSelf());
   }
 
+  //default scope test method to check if we get correct exception
+  void forUnitTestOnlyExplicitTransactionClose(){
+      transaction.close();
+  }
+
 }
index 6733bcfb9f6e323be99a849195bddac637f11588..97bb196f9fc36b9635e906bb654fafc68b2538c4 100644 (file)
@@ -16,6 +16,7 @@ import akka.event.Logging;
 import akka.event.LoggingAdapter;
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataExists;
 import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
 import org.opendaylight.controller.cluster.datastore.messages.MergeData;
 import org.opendaylight.controller.cluster.datastore.messages.ReadData;
@@ -55,6 +56,8 @@ public class ShardReadWriteTransaction extends ShardTransaction {
       deleteData(transaction,DeleteData.fromSerizalizable(message));
     } else if (ReadyTransaction.SERIALIZABLE_CLASS.equals(message.getClass())) {
       readyTransaction(transaction,new ReadyTransaction());
+    } else if(DataExists.SERIALIZABLE_CLASS.equals(message.getClass())) {
+        dataExists(transaction, DataExists.fromSerializable(message));
     }else {
       super.handleReceive(message);
     }
@@ -65,4 +68,13 @@ public class ShardReadWriteTransaction extends ShardTransaction {
     getSender().tell(new CloseTransactionReply().toSerializable(), getSelf());
     getSelf().tell(PoisonPill.getInstance(), getSelf());
   }
+
+    /**
+     * The following method is used in unit testing only
+     * hence the default scope.
+     * This is done to test out failure cases.
+     */
+    public void forUnitTestOnlyExplicitTransactionClose() {
+        transaction.close();
+    }
 }
index 3a916bda2c6e163d75c4d39a0c4d80b94b823fb2..360a10722c06df2c3e357631b43d89eac801a3d3 100644 (file)
@@ -14,8 +14,11 @@ import akka.event.Logging;
 import akka.event.LoggingAdapter;
 import akka.japi.Creator;
 import com.google.common.base.Optional;
-import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.CheckedFuture;
+import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.DataExists;
+import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
 import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
 import org.opendaylight.controller.cluster.datastore.messages.DeleteDataReply;
 import org.opendaylight.controller.cluster.datastore.messages.MergeData;
@@ -32,6 +35,7 @@ import org.opendaylight.controller.cluster.datastore.modification.ImmutableCompo
 import org.opendaylight.controller.cluster.datastore.modification.MergeModification;
 import org.opendaylight.controller.cluster.datastore.modification.MutableCompositeModification;
 import org.opendaylight.controller.cluster.datastore.modification.WriteModification;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
@@ -41,8 +45,6 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
-import java.util.concurrent.ExecutionException;
-
 /**
  * The ShardTransaction Actor represents a remote transaction
  * <p>
@@ -90,7 +92,6 @@ public abstract class ShardTransaction extends AbstractUntypedActor {
   protected ShardTransaction(DOMStoreTransactionChain transactionChain,
                           ActorRef shardActor, SchemaContext schemaContext) {
     this.transactionChain = transactionChain;
-    //this.transaction = transaction;
     this.shardActor = shardActor;
     this.schemaContext = schemaContext;
   }
@@ -174,7 +175,7 @@ public abstract class ShardTransaction extends AbstractUntypedActor {
       getSender().tell(new GetCompositeModificationReply(
           new ImmutableCompositeModification(modification)), getSelf());
     }else{
-      throw new Exception ("ShardTransaction:handleRecieve received an unknown message"+message);
+         throw new UnknownMessageException(message);
     }
   }
 
@@ -184,50 +185,73 @@ public abstract class ShardTransaction extends AbstractUntypedActor {
     final ActorRef sender = getSender();
     final ActorRef self = getSelf();
     final YangInstanceIdentifier path = message.getPath();
-    final ListenableFuture<Optional<NormalizedNode<?, ?>>> future =
-        transaction.read(path);
+    final CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> future =
+          transaction.read(path);
 
-    future.addListener(new Runnable() {
+      future.addListener(new Runnable() {
       @Override
       public void run() {
         try {
-          Optional<NormalizedNode<?, ?>> optional = future.get();
+          Optional<NormalizedNode<?, ?>> optional = future.checkedGet();
           if (optional.isPresent()) {
             sender.tell(new ReadDataReply(schemaContext,optional.get()).toSerializable(), self);
           } else {
             sender.tell(new ReadDataReply(schemaContext,null).toSerializable(), self);
           }
-        } catch (InterruptedException | ExecutionException e) {
-          log.error(e,
-              "An exception happened when reading data from path : "
-                  + path.toString());
+        } catch (Exception e) {
+            sender.tell(new akka.actor.Status.Failure(e),self);
         }
 
       }
     }, getContext().dispatcher());
   }
 
+    protected void dataExists(DOMStoreReadTransaction transaction, DataExists message) {
+        final YangInstanceIdentifier path = message.getPath();
+
+        try {
+            Boolean exists = transaction.exists(path).checkedGet();
+            getSender().tell(new DataExistsReply(exists).toSerializable(), getSelf());
+        } catch (ReadFailedException e) {
+            getSender().tell(new akka.actor.Status.Failure(e),getSelf());
+        }
+
+    }
 
   protected void writeData(DOMStoreWriteTransaction transaction, WriteData message) {
     modification.addModification(
         new WriteModification(message.getPath(), message.getData(),schemaContext));
     LOG.debug("writeData at path : " + message.getPath().toString());
-    transaction.write(message.getPath(), message.getData());
-    getSender().tell(new WriteDataReply().toSerializable(), getSelf());
+
+    try {
+        transaction.write(message.getPath(), message.getData());
+        getSender().tell(new WriteDataReply().toSerializable(), getSelf());
+    }catch(Exception e){
+        getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+    }
   }
 
   protected void mergeData(DOMStoreWriteTransaction transaction, MergeData message) {
     modification.addModification(
         new MergeModification(message.getPath(), message.getData(), schemaContext));
     LOG.debug("mergeData at path : " + message.getPath().toString());
-    transaction.merge(message.getPath(), message.getData());
-    getSender().tell(new MergeDataReply().toSerializable(), getSelf());
+    try {
+        transaction.merge(message.getPath(), message.getData());
+        getSender().tell(new MergeDataReply().toSerializable(), getSelf());
+    }catch(Exception e){
+        getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+    }
   }
 
   protected void deleteData(DOMStoreWriteTransaction transaction, DeleteData message) {
+    LOG.debug("deleteData at path : " + message.getPath().toString());
     modification.addModification(new DeleteModification(message.getPath()));
-    transaction.delete(message.getPath());
-    getSender().tell(new DeleteDataReply().toSerializable(), getSelf());
+    try {
+        transaction.delete(message.getPath());
+        getSender().tell(new DeleteDataReply().toSerializable(), getSelf());
+    }catch(Exception e){
+        getSender().tell(new akka.actor.Status.Failure(e), getSelf());
+    }
   }
 
   protected void readyTransaction(DOMStoreWriteTransaction transaction, ReadyTransaction message) {
index ce63f1107daafe31855c67c4eec4d676e94f384a..c508255ea490ee09b370dae8a49320495166c820 100644 (file)
@@ -40,23 +40,27 @@ public class ShardTransactionChain extends AbstractUntypedActor {
             chain.close();
             getSender().tell(new CloseTransactionChainReply().toSerializable(), getSelf());
         }else{
-          throw new Exception("Not recognized message recieved="+message);
+            unknownMessage(message);
         }
     }
 
+    private ActorRef getShardActor(){
+        return getContext().parent();
+    }
+
   private ActorRef createTypedTransactionActor(CreateTransaction createTransaction,String transactionId){
     if(createTransaction.getTransactionType()== TransactionProxy.TransactionType.READ_ONLY.ordinal()){
       return getContext().actorOf(
-          ShardTransaction.props( chain.newReadOnlyTransaction(), getSelf(), schemaContext), transactionId);
+          ShardTransaction.props( chain.newReadOnlyTransaction(), getShardActor(), schemaContext), transactionId);
 
     }else if (createTransaction.getTransactionType()== TransactionProxy.TransactionType.READ_WRITE.ordinal()){
       return getContext().actorOf(
-          ShardTransaction.props( chain.newReadWriteTransaction(), getSelf(), schemaContext), transactionId);
+          ShardTransaction.props( chain.newReadWriteTransaction(), getShardActor(), schemaContext), transactionId);
 
 
     }else if (createTransaction.getTransactionType()== TransactionProxy.TransactionType.WRITE_ONLY.ordinal()){
       return getContext().actorOf(
-          ShardTransaction.props( chain.newWriteOnlyTransaction(), getSelf(), schemaContext), transactionId);
+          ShardTransaction.props( chain.newWriteOnlyTransaction(), getShardActor(), schemaContext), transactionId);
     }else{
       throw new IllegalArgumentException ("CreateTransaction message has unidentified transaction type="+createTransaction.getTransactionType()) ;
     }
index 2a5429ba81c4b0025e0799631da1ba914956ac3c..91e578b46d1f6de8f2e841f7d7f459541a9c3bdb 100644 (file)
@@ -63,4 +63,13 @@ public class ShardWriteTransaction extends ShardTransaction {
     getSender().tell(new CloseTransactionReply().toSerializable(), getSelf());
     getSelf().tell(PoisonPill.getInstance(), getSelf());
   }
+
+    /**
+     * The following method is used in unit testing only
+     * hence the default scope.
+     * This is done to test out failure cases.
+     */
+    public void forUnitTestOnlyExplicitTransactionClose() {
+        transaction.close();
+    }
 }
index a8deb0153a400eefbf28fd4269892c86ea61ef35..500b73ce9de6531c3a1d60df3e192dea18dc4606 100644 (file)
@@ -67,7 +67,7 @@ public class ThreePhaseCommitCohort extends AbstractUntypedActor {
         } else if (message.getClass().equals(AbortTransaction.SERIALIZABLE_CLASS)) {
             abort(new AbortTransaction());
         } else {
-          throw new Exception ("Not recognized message received,message="+message);
+            unknownMessage(message);
         }
     }
 
@@ -130,7 +130,7 @@ public class ThreePhaseCommitCohort extends AbstractUntypedActor {
                     Boolean canCommit = future.get();
                     sender.tell(new CanCommitTransactionReply(canCommit).toSerializable(), self);
                 } catch (InterruptedException | ExecutionException e) {
-                    log.error(e, "An exception happened when aborting");
+                    log.error(e, "An exception happened when checking canCommit");
                 }
             }
         }, getContext().dispatcher());
index 915b13dd8bc234a6cbf898658b8e6479333b36c2..5b447943ea7fd798e5572e55483ff8b4fcf7e331 100644 (file)
@@ -59,17 +59,22 @@ public class ThreePhaseCommitCohortProxy implements
     }
 
     @Override public ListenableFuture<Boolean> canCommit() {
+        LOG.debug("txn {} canCommit", transactionId);
         Callable<Boolean> call = new Callable<Boolean>() {
 
             @Override
             public Boolean call() throws Exception {
                 for(ActorPath actorPath : cohortPaths){
+
+                    Object message = new CanCommitTransaction().toSerializable();
+                    LOG.debug("txn {} Sending {} to {}", transactionId, message, actorPath);
+
                     ActorSelection cohort = actorContext.actorSelection(actorPath);
 
                     try {
                         Object response =
                                 actorContext.executeRemoteOperation(cohort,
-                                        new CanCommitTransaction().toSerializable(),
+                                        message,
                                         ActorContext.ASK_DURATION);
 
                         if (response.getClass().equals(CanCommitTransactionReply.SERIALIZABLE_CLASS)) {
@@ -80,6 +85,7 @@ public class ThreePhaseCommitCohortProxy implements
                             }
                         }
                     } catch(RuntimeException e){
+                        // FIXME : Need to properly handle this
                         LOG.error("Unexpected Exception", e);
                         return false;
                     }
@@ -93,14 +99,17 @@ public class ThreePhaseCommitCohortProxy implements
     }
 
     @Override public ListenableFuture<Void> preCommit() {
+        LOG.debug("txn {} preCommit", transactionId);
         return voidOperation(new PreCommitTransaction().toSerializable(), PreCommitTransactionReply.SERIALIZABLE_CLASS);
     }
 
     @Override public ListenableFuture<Void> abort() {
+        LOG.debug("txn {} abort", transactionId);
         return voidOperation(new AbortTransaction().toSerializable(), AbortTransactionReply.SERIALIZABLE_CLASS);
     }
 
     @Override public ListenableFuture<Void> commit() {
+        LOG.debug("txn {} commit", transactionId);
         return voidOperation(new CommitTransaction().toSerializable(), CommitTransactionReply.SERIALIZABLE_CLASS);
     }
 
@@ -111,6 +120,8 @@ public class ThreePhaseCommitCohortProxy implements
                 for(ActorPath actorPath : cohortPaths){
                     ActorSelection cohort = actorContext.actorSelection(actorPath);
 
+                    LOG.debug("txn {} Sending {} to {}", transactionId, message, actorPath);
+
                     try {
                         Object response =
                             actorContext.executeRemoteOperation(cohort,
index fa98905a66968b111372d4b893ef35bef6e32bd4..95862ae9d93670de67dbdd8bd99074a9ba49e8b8 100644 (file)
@@ -13,14 +13,18 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.Props;
 import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListeningExecutorService;
 import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
 import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
+import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataExists;
+import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
 import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
 import org.opendaylight.controller.cluster.datastore.messages.MergeData;
 import org.opendaylight.controller.cluster.datastore.messages.ReadData;
@@ -75,7 +79,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
     private final TransactionType transactionType;
     private final ActorContext actorContext;
     private final Map<String, TransactionContext> remoteTransactionPaths = new HashMap<>();
-    private final String identifier;
+    private final TransactionIdentifier identifier;
     private final ListeningExecutorService executor;
     private final SchemaContext schemaContext;
 
@@ -85,13 +89,18 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         ListeningExecutorService executor,
         SchemaContext schemaContext
     ) {
+        this.actorContext = Preconditions.checkNotNull(actorContext, "actorContext should not be null");
+        this.transactionType = Preconditions.checkNotNull(transactionType, "transactionType should not be null");
+        this.executor = Preconditions.checkNotNull(executor, "executor should not be null");
+        this.schemaContext = Preconditions.checkNotNull(schemaContext, "schemaContext should not be null");
+
+        String memberName = actorContext.getCurrentMemberName();
+        if(memberName == null){
+            memberName = "UNKNOWN-MEMBER";
+        }
+        this.identifier = TransactionIdentifier.builder().memberName(memberName).counter(counter.getAndIncrement()).build();
 
-        this.identifier = actorContext.getCurrentMemberName() + "-txn-" + counter.getAndIncrement();
-        this.transactionType = transactionType;
-        this.actorContext = actorContext;
-        this.executor = executor;
-        this.schemaContext = schemaContext;
-
+        LOG.debug("Created txn {}", identifier);
 
     }
 
@@ -99,14 +108,27 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
     public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> read(
             final YangInstanceIdentifier path) {
 
+        LOG.debug("txn {} read {}", identifier, path);
+
         createTransactionIfMissing(actorContext, path);
 
         return transactionContext(path).readData(path);
     }
 
+    @Override public CheckedFuture<Boolean, ReadFailedException> exists(
+        YangInstanceIdentifier path) {
+        LOG.debug("txn {} exists {}", identifier, path);
+
+        createTransactionIfMissing(actorContext, path);
+
+        return transactionContext(path).dataExists(path);
+    }
+
     @Override
     public void write(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
 
+        LOG.debug("txn {} write {}", identifier, path);
+
         createTransactionIfMissing(actorContext, path);
 
         transactionContext(path).writeData(path, data);
@@ -115,6 +137,8 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
     @Override
     public void merge(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
 
+        LOG.debug("txn {} merge {}", identifier, path);
+
         createTransactionIfMissing(actorContext, path);
 
         transactionContext(path).mergeData(path, data);
@@ -123,6 +147,8 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
     @Override
     public void delete(YangInstanceIdentifier path) {
 
+        LOG.debug("txn {} delete {}", identifier, path);
+
         createTransactionIfMissing(actorContext, path);
 
         transactionContext(path).deleteData(path);
@@ -132,7 +158,12 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
     public DOMStoreThreePhaseCommitCohort ready() {
         List<ActorPath> cohortPaths = new ArrayList<>();
 
+        LOG.debug("txn {} Trying to get {} transactions ready for commit", identifier, remoteTransactionPaths.size());
+
         for(TransactionContext transactionContext : remoteTransactionPaths.values()) {
+
+            LOG.debug("txn {} Readying transaction for shard {}", identifier, transactionContext.getShardName());
+
             Object result = transactionContext.readyTransaction();
 
             if(result.getClass().equals(ReadyTransactionReply.SERIALIZABLE_CLASS)){
@@ -143,7 +174,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             }
         }
 
-        return new ThreePhaseCommitCohortProxy(actorContext, cohortPaths, identifier, executor);
+        return new ThreePhaseCommitCohortProxy(actorContext, cohortPaths, identifier.toString(), executor);
     }
 
     @Override
@@ -180,7 +211,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         try {
             Object response = actorContext.executeShardOperation(shardName,
-                new CreateTransaction(identifier,this.transactionType.ordinal() ).toSerializable(),
+                new CreateTransaction(identifier.toString(),this.transactionType.ordinal() ).toSerializable(),
                 ActorContext.ASK_DURATION);
             if (response.getClass()
                 .equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
@@ -189,7 +220,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
                 String transactionPath = reply.getTransactionPath();
 
-                LOG.info("Received transaction path = {}"  , transactionPath );
+                LOG.debug("txn {} Received transaction path = {}", identifier, transactionPath);
 
                 ActorSelection transactionActor =
                     actorContext.actorSelection(transactionPath);
@@ -200,7 +231,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
                 remoteTransactionPaths.put(shardName, transactionContext);
             }
         } catch(TimeoutException | PrimaryNotFoundException e){
-            LOG.error("Creating NoOpTransaction because of : {}", e.getMessage());
+            LOG.error("txn {} Creating NoOpTransaction because of : {}", identifier, e.getMessage());
             remoteTransactionPaths.put(shardName,
                 new NoOpTransactionContext(shardName));
         }
@@ -223,13 +254,15 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
                 final YangInstanceIdentifier path);
 
         void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data);
+
+        CheckedFuture<Boolean, ReadFailedException> dataExists(YangInstanceIdentifier path);
     }
 
 
-    private class TransactionContextImpl implements TransactionContext{
+    private class TransactionContextImpl implements TransactionContext {
         private final String shardName;
         private final String actorPath;
-        private final ActorSelection  actor;
+        private final ActorSelection actor;
 
 
         private TransactionContextImpl(String shardName, String actorPath,
@@ -247,7 +280,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             return actor;
         }
 
-        @Override public String getResolvedCohortPath(String cohortPath){
+        @Override public String getResolvedCohortPath(String cohortPath) {
             return actorContext.resolvePath(actorPath, cohortPath);
         }
 
@@ -268,38 +301,76 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             getActor().tell(new DeleteData(path).toSerializable(), null);
         }
 
-        @Override public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data){
-            getActor().tell(new MergeData(path, data, schemaContext).toSerializable(), null);
+        @Override public void mergeData(YangInstanceIdentifier path,
+            NormalizedNode<?, ?> data) {
+            getActor()
+                .tell(new MergeData(path, data, schemaContext).toSerializable(),
+                    null);
         }
 
-        @Override public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
-                final YangInstanceIdentifier path) {
-
-            Callable<Optional<NormalizedNode<?,?>>> call = new Callable<Optional<NormalizedNode<?,?>>>() {
+        @Override
+        public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
+            final YangInstanceIdentifier path) {
 
-                @Override public Optional<NormalizedNode<?,?>> call() throws Exception {
-                    Object response = actorContext
-                        .executeRemoteOperation(getActor(), new ReadData(path).toSerializable(),
-                            ActorContext.ASK_DURATION);
-                    if(response.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)){
-                        ReadDataReply reply = ReadDataReply.fromSerializable(schemaContext,path, response);
-                        if(reply.getNormalizedNode() == null){
-                            return Optional.absent();
+            Callable<Optional<NormalizedNode<?, ?>>> call =
+                new Callable<Optional<NormalizedNode<?, ?>>>() {
+
+                    @Override public Optional<NormalizedNode<?, ?>> call()
+                        throws Exception {
+                        Object response = actorContext
+                            .executeRemoteOperation(getActor(),
+                                new ReadData(path).toSerializable(),
+                                ActorContext.ASK_DURATION);
+                        if (response.getClass()
+                            .equals(ReadDataReply.SERIALIZABLE_CLASS)) {
+                            ReadDataReply reply = ReadDataReply
+                                .fromSerializable(schemaContext, path,
+                                    response);
+                            if (reply.getNormalizedNode() == null) {
+                                return Optional.absent();
+                            }
+                            return Optional.<NormalizedNode<?, ?>>of(
+                                reply.getNormalizedNode());
                         }
-                        return Optional.<NormalizedNode<?,?>>of(reply.getNormalizedNode());
-                    }
 
-                    return Optional.absent();
-                }
-            };
+                        throw new ReadFailedException("Read Failed " + path);
+                    }
+                };
 
-            return MappingCheckedFuture.create(executor.submit(call), ReadFailedException.MAPPER);
+            return MappingCheckedFuture
+                .create(executor.submit(call), ReadFailedException.MAPPER);
         }
 
-        @Override public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-            getActor().tell(new WriteData(path, data, schemaContext).toSerializable(), null);
+        @Override public void writeData(YangInstanceIdentifier path,
+            NormalizedNode<?, ?> data) {
+            getActor()
+                .tell(new WriteData(path, data, schemaContext).toSerializable(),
+                    null);
         }
 
+        @Override public CheckedFuture<Boolean, ReadFailedException> dataExists(
+            final YangInstanceIdentifier path) {
+
+            Callable<Boolean> call = new Callable<Boolean>() {
+
+                @Override public Boolean call() throws Exception {
+                    Object o = actorContext.executeRemoteOperation(getActor(),
+                        new DataExists(path).toSerializable(),
+                        ActorContext.ASK_DURATION
+                    );
+
+
+                    if (DataExistsReply.SERIALIZABLE_CLASS
+                        .equals(o.getClass())) {
+                        return DataExistsReply.fromSerializable(o).exists();
+                    }
+
+                    throw new ReadFailedException("Exists Failed " + path);
+                }
+            };
+            return MappingCheckedFuture
+                .create(executor.submit(call), ReadFailedException.MAPPER);
+        }
     }
 
     private class NoOpTransactionContext implements TransactionContext {
@@ -324,35 +395,44 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         }
 
         @Override public void closeTransaction() {
-            LOG.error("closeTransaction called");
+            LOG.warn("txn {} closeTransaction called", identifier);
         }
 
         @Override public Object readyTransaction() {
-            LOG.error("readyTransaction called");
+            LOG.warn("txn {} readyTransaction called", identifier);
             cohort = actorContext.getActorSystem().actorOf(Props.create(NoOpCohort.class));
             return new ReadyTransactionReply(cohort.path()).toSerializable();
         }
 
         @Override public void deleteData(YangInstanceIdentifier path) {
-            LOG.error("deleteData called path = {}", path);
+            LOG.warn("txt {} deleteData called path = {}", identifier, path);
         }
 
         @Override public void mergeData(YangInstanceIdentifier path,
             NormalizedNode<?, ?> data) {
-            LOG.error("mergeData called path = {}", path);
+            LOG.warn("txn {} mergeData called path = {}", identifier, path);
         }
 
         @Override
         public CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException> readData(
             YangInstanceIdentifier path) {
-            LOG.error("readData called path = {}", path);
+            LOG.warn("txn {} readData called path = {}", identifier, path);
             return Futures.immediateCheckedFuture(
                 Optional.<NormalizedNode<?, ?>>absent());
         }
 
         @Override public void writeData(YangInstanceIdentifier path,
             NormalizedNode<?, ?> data) {
-            LOG.error("writeData called path = {}", path);
+            LOG.warn("txn {} writeData called path = {}", identifier, path);
+        }
+
+        @Override public CheckedFuture<Boolean, ReadFailedException> dataExists(
+            YangInstanceIdentifier path) {
+            LOG.warn("txn {} dataExists called path = {}", identifier, path);
+
+            // Returning false instead of an exception to keep this aligned with
+            // read
+            return Futures.immediateCheckedFuture(false);
         }
     }
 
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/exceptions/UnknownMessageException.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/exceptions/UnknownMessageException.java
new file mode 100644 (file)
index 0000000..f4f2524
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.exceptions;
+
+public class UnknownMessageException extends Exception {
+    private final Object message;
+
+    public UnknownMessageException(Object message) {
+        this.message = message;
+    }
+
+    @Override public String getMessage() {
+        return "Unknown message received " + " - " + message;
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardIdentifier.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardIdentifier.java
new file mode 100644 (file)
index 0000000..c692881
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.identifiers;
+
+import com.google.common.base.Preconditions;
+
+public class ShardIdentifier {
+    private final String shardName;
+    private final String memberName;
+    private final String type;
+
+
+    public ShardIdentifier(String shardName, String memberName, String type) {
+
+        Preconditions.checkNotNull(shardName, "shardName should not be null");
+        Preconditions.checkNotNull(memberName, "memberName should not be null");
+        Preconditions.checkNotNull(type, "type should not be null");
+
+        this.shardName = shardName;
+        this.memberName = memberName;
+        this.type = type;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+
+        ShardIdentifier that = (ShardIdentifier) o;
+
+        if (!memberName.equals(that.memberName)) {
+            return false;
+        }
+        if (!shardName.equals(that.shardName)) {
+            return false;
+        }
+        if (!type.equals(that.type)) {
+            return false;
+        }
+
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        int result = shardName.hashCode();
+        result = 31 * result + memberName.hashCode();
+        result = 31 * result + type.hashCode();
+        return result;
+    }
+
+    @Override public String toString() {
+        StringBuilder builder = new StringBuilder();
+        builder.append(memberName).append("-shard-").append(shardName).append("-").append(type);
+        return builder.toString();
+    }
+
+    public static Builder builder(){
+        return new Builder();
+    }
+
+    public static class Builder {
+        private String shardName;
+        private String memberName;
+        private String type;
+
+        public ShardIdentifier build(){
+            return new ShardIdentifier(shardName, memberName, type);
+        }
+
+        public Builder shardName(String shardName){
+            this.shardName = shardName;
+            return this;
+        }
+
+        public Builder memberName(String memberName){
+            this.memberName = memberName;
+            return this;
+        }
+
+        public Builder type(String type){
+            this.type = type;
+            return this;
+        }
+
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardManagerIdentifier.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardManagerIdentifier.java
new file mode 100644 (file)
index 0000000..65bf010
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.identifiers;
+
+public class ShardManagerIdentifier {
+    private final String type;
+
+    public ShardManagerIdentifier(String type) {
+        this.type = type;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+
+        ShardManagerIdentifier that = (ShardManagerIdentifier) o;
+
+        if (!type.equals(that.type)) {
+            return false;
+        }
+
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        return type.hashCode();
+    }
+
+    @Override public String toString() {
+        StringBuilder builder = new StringBuilder();
+        builder.append("shardmanager-").append(type);
+        return builder.toString();
+    }
+
+    public static Builder builder(){
+        return new Builder();
+    }
+
+    public static class Builder {
+        private String type;
+
+        public Builder type(String type){
+            this.type = type;
+            return this;
+        }
+
+        public ShardManagerIdentifier build(){
+            return new ShardManagerIdentifier(this.type);
+        }
+
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardTransactionIdentifier.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardTransactionIdentifier.java
new file mode 100644 (file)
index 0000000..77e8142
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.identifiers;
+
+import com.google.common.base.Preconditions;
+
+public class ShardTransactionIdentifier {
+    private final String remoteTransactionId;
+
+    public ShardTransactionIdentifier(String remoteTransactionId) {
+        this.remoteTransactionId = Preconditions.checkNotNull(remoteTransactionId, "remoteTransactionId should not be null");
+    }
+
+    public static Builder builder(){
+        return new Builder();
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+
+        ShardTransactionIdentifier that = (ShardTransactionIdentifier) o;
+
+        if (!remoteTransactionId.equals(that.remoteTransactionId)) {
+            return false;
+        }
+
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        return remoteTransactionId.hashCode();
+    }
+
+    @Override public String toString() {
+        final StringBuilder sb =
+            new StringBuilder();
+        sb.append("shard-").append(remoteTransactionId);
+        return sb.toString();
+    }
+
+    public static class Builder {
+        private String remoteTransactionId;
+
+        public Builder remoteTransactionId(String remoteTransactionId){
+            this.remoteTransactionId = remoteTransactionId;
+            return this;
+        }
+
+        public ShardTransactionIdentifier build(){
+            return new ShardTransactionIdentifier(remoteTransactionId);
+        }
+
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/TransactionIdentifier.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/identifiers/TransactionIdentifier.java
new file mode 100644 (file)
index 0000000..ba2e27c
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.identifiers;
+
+import com.google.common.base.Preconditions;
+
+public class TransactionIdentifier {
+    private final String memberName;
+    private final long counter;
+
+
+    public TransactionIdentifier(String memberName, long counter) {
+        this.memberName = Preconditions.checkNotNull(memberName, "memberName should not be null");
+        this.counter = counter;
+    }
+
+    public static Builder builder(){
+        return new Builder();
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+
+        TransactionIdentifier that = (TransactionIdentifier) o;
+
+        if (counter != that.counter) {
+            return false;
+        }
+        if (!memberName.equals(that.memberName)) {
+            return false;
+        }
+
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        int result = memberName.hashCode();
+        result = 31 * result + (int) (counter ^ (counter >>> 32));
+        return result;
+    }
+
+    @Override public String toString() {
+        final StringBuilder sb =
+            new StringBuilder();
+        sb.append(memberName).append("-txn-").append(counter);
+        return sb.toString();
+    }
+
+    public static class Builder {
+        private String memberName;
+        private long counter;
+
+        public TransactionIdentifier build(){
+            return new TransactionIdentifier(memberName, counter);
+        }
+
+        public Builder memberName(String memberName){
+            this.memberName = memberName;
+            return this;
+        }
+
+        public Builder counter(long counter){
+            this.counter = counter;
+            return this;
+        }
+    }
+}
index de1ac18533ba0d85d357d8c450c568237b918d7a..a5d7b77a647df5e16fe3261e0b346b4bde4af0f5 100644 (file)
@@ -34,6 +34,7 @@ public abstract class AbstractBaseMBean {
   public static String BASE_JMX_PREFIX = "org.opendaylight.controller:";
   public static String JMX_TYPE_DISTRIBUTED_DATASTORE = "DistributedDatastore";
   public static String JMX_CATEGORY_SHARD = "Shard";
+  public static String JMX_CATEGORY_SHARD_MANAGER = "ShardManager";
 
   private static final Logger LOG = LoggerFactory
       .getLogger(AbstractBaseMBean.class);
index a3359086b6efdc6eea954eeb6168eb1edf906ff8..afca87f0df6458a041b8d2219e3e578066513481 100644 (file)
@@ -8,19 +8,20 @@ import java.util.Map;
  * Date: 7/16/14
  */
 public class ShardMBeanFactory {
-  private static Map<String,ShardStats> shardMBeans= new HashMap<String,ShardStats>();
+    private static Map<String, ShardStats> shardMBeans =
+        new HashMap<String, ShardStats>();
 
-  public static ShardStats getShardStatsMBean(String shardName){
-       if(shardMBeans.containsKey(shardName)){
+    public static ShardStats getShardStatsMBean(String shardName) {
+        if (shardMBeans.containsKey(shardName)) {
             return shardMBeans.get(shardName);
-       }else {
-         ShardStats shardStatsMBeanImpl = new ShardStats(shardName);
+        } else {
+            ShardStats shardStatsMBeanImpl = new ShardStats(shardName);
 
-         if(shardStatsMBeanImpl.registerMBean()) {
-           shardMBeans.put(shardName, shardStatsMBeanImpl);
-         }
-         return shardStatsMBeanImpl;
-       }
-  }
+            if (shardStatsMBeanImpl.registerMBean()) {
+                shardMBeans.put(shardName, shardStatsMBeanImpl);
+            }
+            return shardStatsMBeanImpl;
+        }
+    }
 
 }
index 4eb6a8cef96319d0ca885112c3a0812ee06e5431..c6c1579ce336dc78e64ecc6b79042c30eee28148 100644 (file)
@@ -2,85 +2,177 @@ package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
 
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.AbstractBaseMBean;
 
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
 /**
  * @author: syedbahm
  */
 public class ShardStats extends AbstractBaseMBean implements ShardStatsMBean {
-  private  Long committedTransactionsCount;
-  private Long journalMessagesCount;
-  final private String shardName;
-  private String leader;
-  private String raftState;
 
-  ShardStats(String shardName){
-    this.shardName = shardName;
-    committedTransactionsCount =0L;
-    journalMessagesCount = 0L;
-  };
+    private final String shardName;
+
+    private Long committedTransactionsCount = 0L;
+
+    private Long readOnlyTransactionCount = 0L;
+
+    private Long writeOnlyTransactionCount = 0L;
+
+    private Long readWriteTransactionCount = 0L;
+
+    private String leader;
+
+    private String raftState;
+
+    private Long lastLogTerm = -1L;
+
+    private Long lastLogIndex = -1L;
+
+    private Long currentTerm = -1L;
+
+    private Long commitIndex = -1L;
+
+    private Long lastApplied = -1L;
+
+    private Date lastCommittedTransactionTime = new Date(0L);
+
+    private Long failedTransactionsCount = 0L;
+
+    private SimpleDateFormat sdf =
+        new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
+
+    ShardStats(String shardName) {
+        this.shardName = shardName;
+    }
+
+
+    @Override
+    public String getShardName() {
+        return shardName;
+    }
+
+    @Override
+    public Long getCommittedTransactionsCount() {
+        return committedTransactionsCount;
+    }
+
+    @Override public String getLeader() {
+        return leader;
+    }
+
+    @Override public String getRaftState() {
+        return raftState;
+    }
+
+    @Override public Long getReadOnlyTransactionCount() {
+        return readOnlyTransactionCount;
+    }
+
+    @Override public Long getWriteOnlyTransactionCount() {
+        return writeOnlyTransactionCount;
+    }
+
+    @Override public Long getReadWriteTransactionCount() {
+        return readWriteTransactionCount;
+    }
+
+    @Override public Long getLastLogIndex() {
+        return lastLogIndex;
+    }
+
+    @Override public Long getLastLogTerm() {
+        return lastLogTerm;
+    }
+
+    @Override public Long getCurrentTerm() {
+        return currentTerm;
+    }
+
+    @Override public Long getCommitIndex() {
+        return commitIndex;
+    }
+
+    @Override public Long getLastApplied() {
+        return lastApplied;
+    }
+
+    @Override
+    public String getLastCommittedTransactionTime() {
 
+        return sdf.format(lastCommittedTransactionTime);
+    }
 
-  @Override
-  public String getShardName() {
-    return shardName;
-  }
+    @Override public Long getFailedTransactionsCount() {
+        return failedTransactionsCount;
+    }
 
-  @Override
-  public Long getCommittedTransactionsCount() {
-    return committedTransactionsCount;
-  }
+    public Long incrementCommittedTransactionCount() {
+        return committedTransactionsCount++;
+    }
 
-  @Override
-  public Long getJournalMessagesCount() {
-    //FIXME: this will be populated once after integration with Raft stuff
-    return journalMessagesCount;
-  }
+    public Long incrementReadOnlyTransactionCount() {
+        return readOnlyTransactionCount++;
+    }
 
-  @Override public String getLeader() {
-    return leader;
-  }
+    public Long incrementWriteOnlyTransactionCount() {
+        return writeOnlyTransactionCount++;
+    }
 
-  @Override public String getRaftState() {
-    return raftState;
-  }
+    public Long incrementReadWriteTransactionCount() {
+        return readWriteTransactionCount++;
+    }
 
-  public Long incrementCommittedTransactionCount() {
-    return committedTransactionsCount++;
-  }
+    public void setLeader(String leader) {
+        this.leader = leader;
+    }
 
+    public void setRaftState(String raftState) {
+        this.raftState = raftState;
+    }
 
-  public void updateCommittedTransactionsCount(long currentCount){
-     committedTransactionsCount = currentCount;
+    public void setLastLogTerm(Long lastLogTerm) {
+        this.lastLogTerm = lastLogTerm;
+    }
 
-  }
+    public void setLastLogIndex(Long lastLogIndex) {
+        this.lastLogIndex = lastLogIndex;
+    }
 
-  public void updateJournalMessagesCount(long currentCount){
-    journalMessagesCount  = currentCount;
+    public void setCurrentTerm(Long currentTerm) {
+        this.currentTerm = currentTerm;
+    }
 
-  }
+    public void setCommitIndex(Long commitIndex) {
+        this.commitIndex = commitIndex;
+    }
 
-  public void setLeader(String leader){
-    this.leader = leader;
-  }
+    public void setLastApplied(Long lastApplied) {
+        this.lastApplied = lastApplied;
+    }
 
-  public void setRaftState(String raftState){
-    this.raftState = raftState;
-  }
 
+    public void setLastCommittedTransactionTime(
+        Date lastCommittedTransactionTime) {
+        this.lastCommittedTransactionTime = lastCommittedTransactionTime;
+    }
 
-  @Override
-  protected String getMBeanName() {
-    return  shardName;
-  }
+    @Override
+    protected String getMBeanName() {
+        return shardName;
+    }
 
-  @Override
-  protected String getMBeanType() {
-    return JMX_TYPE_DISTRIBUTED_DATASTORE;
-  }
+    @Override
+    protected String getMBeanType() {
+        return JMX_TYPE_DISTRIBUTED_DATASTORE;
+    }
 
-  @Override
-  protected String getMBeanCategory() {
-    return JMX_CATEGORY_SHARD;
-  }
+    @Override
+    protected String getMBeanCategory() {
+        return JMX_CATEGORY_SHARD;
+    }
 
 
+    public void incrementFailedTransactionsCount() {
+        this.failedTransactionsCount++;
+    }
 }
index 9ebcc7fa5a966c75b3c46b4271af9864f5bf5b90..b8b220ee82fae3851e1f8bcc70a04f74b1c31560 100644 (file)
@@ -4,9 +4,32 @@ package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shard;
  * @author: syedbahm
  */
 public interface ShardStatsMBean {
-   String getShardName();
-   Long getCommittedTransactionsCount();
-   Long getJournalMessagesCount();
-   String getLeader();
-   String getRaftState();
+    String getShardName();
+
+    Long getCommittedTransactionsCount();
+
+    String getLeader();
+
+    String getRaftState();
+
+    Long getReadOnlyTransactionCount();
+
+    Long getWriteOnlyTransactionCount();
+
+    Long getReadWriteTransactionCount();
+
+    Long getLastLogIndex();
+
+    Long getLastLogTerm();
+
+    Long getCurrentTerm();
+
+    Long getCommitIndex();
+
+    Long getLastApplied();
+
+    String getLastCommittedTransactionTime();
+
+    Long getFailedTransactionsCount();
+
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shardmanager/ShardManagerInfo.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shardmanager/ShardManagerInfo.java
new file mode 100644 (file)
index 0000000..0c609b4
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager;
+
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.AbstractBaseMBean;
+
+import java.util.List;
+
+public class ShardManagerInfo extends AbstractBaseMBean implements
+    ShardManagerInfoMBean {
+
+    private final String name;
+    private final List<String> localShards;
+
+    public ShardManagerInfo(String name, List<String> localShards) {
+        this.name = name;
+        this.localShards = localShards;
+    }
+
+
+    @Override protected String getMBeanName() {
+        return name;
+    }
+
+    @Override protected String getMBeanType() {
+        return JMX_TYPE_DISTRIBUTED_DATASTORE;
+    }
+
+    @Override protected String getMBeanCategory() {
+        return JMX_CATEGORY_SHARD_MANAGER;
+    }
+
+    public static ShardManagerInfo createShardManagerMBean(String name, List<String> localShards){
+        ShardManagerInfo shardManagerInfo = new ShardManagerInfo(name,
+            localShards);
+
+        shardManagerInfo.registerMBean();
+
+        return shardManagerInfo;
+    }
+
+    @Override public List<String> getLocalShards() {
+        return localShards;
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shardmanager/ShardManagerInfoMBean.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/shardmanager/ShardManagerInfoMBean.java
new file mode 100644 (file)
index 0000000..28ccc4f
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans.shardmanager;
+
+import java.util.List;
+
+public interface ShardManagerInfoMBean {
+    List<String> getLocalShards();
+}
index 4515bd70427a6eca892bb61115817941684d0a1c..c639064036e82ff80ea5556f769af60bcecbec51 100644 (file)
@@ -11,7 +11,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
 
 public class AbortTransaction implements SerializableMessage {
-  public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.AbortTransaction.class;
+  public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.AbortTransaction.class;
 
   @Override
   public Object toSerializable() {
index 31a06fe4c51f66329dd2b85dc49bad38afeb7b25..88e26401f700a449094960aaa784f984e9a27253 100644 (file)
@@ -11,7 +11,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
 
 public class AbortTransactionReply implements SerializableMessage {
-  public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.AbortTransactionReply.class;
+  public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.AbortTransactionReply.class;
 
 
   @Override
index 2c032aff65ea569567b09595f76b9b52a1108124..08f81c121f1d115b991005e945f2f03c4a8c1892 100644 (file)
@@ -11,7 +11,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
 
 public class CanCommitTransaction implements SerializableMessage {
-  public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CanCommitTransaction.class;
+  public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CanCommitTransaction.class;
 
   @Override
   public Object toSerializable() {
index 57237bcbe219d0a93ba0de4d25001683ed425bed..a54ee6209c8d75b36681cfec470f44a7bcbae593 100644 (file)
@@ -11,7 +11,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import org.opendaylight.controller.protobuff.messages.registration.ListenerRegistrationMessages;
 
 public class CloseDataChangeListenerRegistration implements SerializableMessage {
-  public static Class SERIALIZABLE_CLASS = ListenerRegistrationMessages.CloseDataChangeListenerRegistration.class;
+  public static final Class SERIALIZABLE_CLASS = ListenerRegistrationMessages.CloseDataChangeListenerRegistration.class;
   @Override
   public Object toSerializable() {
     return ListenerRegistrationMessages.CloseDataChangeListenerRegistration.newBuilder().build();
index 14187139aafefd1758dd0dff444882b1ae4c54c9..92138a769c1b4b0d3b3a7f4749313cc4c458002a 100644 (file)
@@ -11,7 +11,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommitCohortMessages;
 
 public class CommitTransaction implements SerializableMessage {
-  public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CommitTransaction.class;
+  public static final  Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CommitTransaction.class;
 
   @Override
   public Object toSerializable() {
index afeba298797ea2aead2879ec98d4100bde0996cd..5751b71037ba84e97a68834c8e28a1399e7c1d36 100644 (file)
@@ -12,7 +12,7 @@ import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommit
 
 public class CommitTransactionReply implements SerializableMessage {
 
-  public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CommitTransactionReply.class;
+  public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.CommitTransactionReply.class;
 
   @Override
   public Object toSerializable() {
index b27ad86be987097b9bb9c37713a50116bfa5116e..d5c9e21611af20df37bb1999d00ead9a44495fda 100644 (file)
@@ -13,7 +13,7 @@ import org.opendaylight.controller.protobuff.messages.transaction.ShardTransacti
 
 
 public class CreateTransaction implements SerializableMessage {
-  public static Class SERIALIZABLE_CLASS = ShardTransactionMessages.CreateTransaction.class;
+  public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.CreateTransaction.class;
   private final String transactionId;
   private final int transactionType;
 
index 6339749f7bb298f27238493dfa999035f48d70ad..8dd04e540e881baf2ac1aefa60671b8803e96280 100644 (file)
@@ -11,7 +11,7 @@ package org.opendaylight.controller.cluster.datastore.messages;
 import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionChainMessages;
 
 public class CreateTransactionChain implements SerializableMessage{
-  public static Class SERIALIZABLE_CLASS = ShardTransactionChainMessages.CreateTransactionChain.class;
+  public static final Class SERIALIZABLE_CLASS = ShardTransactionChainMessages.CreateTransactionChain.class;
 
   @Override
   public Object toSerializable() {
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExists.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExists.java
new file mode 100644 (file)
index 0000000..d52daab
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import org.opendaylight.controller.cluster.datastore.util.InstanceIdentifierUtils;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+public class DataExists implements SerializableMessage{
+
+    public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.DataExists.class;
+
+    private final YangInstanceIdentifier path;
+
+    public DataExists(YangInstanceIdentifier path) {
+        this.path = path;
+    }
+
+    public YangInstanceIdentifier getPath() {
+        return path;
+    }
+
+    @Override public Object toSerializable() {
+        return ShardTransactionMessages.DataExists.newBuilder()
+            .setInstanceIdentifierPathArguments(
+                InstanceIdentifierUtils.toSerializable(path)).build();
+    }
+
+    public static DataExists fromSerializable(Object serializable){
+        ShardTransactionMessages.DataExists o = (ShardTransactionMessages.DataExists) serializable;
+        return new DataExists(InstanceIdentifierUtils.fromSerializable(o.getInstanceIdentifierPathArguments()));
+    }
+
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExistsReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataExistsReply.java
new file mode 100644 (file)
index 0000000..04fafa1
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+
+public class DataExistsReply implements SerializableMessage{
+
+
+    public static final Class SERIALIZABLE_CLASS = ShardTransactionMessages.DataExistsReply.class;
+
+    private final boolean exists;
+
+    public DataExistsReply(boolean exists) {
+        this.exists = exists;
+    }
+
+    public boolean exists() {
+        return exists;
+    }
+
+    @Override public Object toSerializable() {
+        return ShardTransactionMessages.DataExistsReply.newBuilder()
+            .setExists(exists).build();
+    }
+
+    public static DataExistsReply fromSerializable(Object serializable){
+        ShardTransactionMessages.DataExistsReply o = (ShardTransactionMessages.DataExistsReply) serializable;
+        return new DataExistsReply(o.getExists());
+    }
+
+}
index 8c2543e48606b20059eb7d42ffdc1081157e1b9c..346519ed5aeea3f7b82a60d263052f0a06a60c8e 100644 (file)
@@ -8,16 +8,18 @@
 
 package org.opendaylight.controller.cluster.datastore.messages;
 
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+
 public class PeerAddressResolved {
-    private final String peerId;
+    private final ShardIdentifier peerId;
     private final String peerAddress;
 
-    public PeerAddressResolved(String peerId, String peerAddress) {
+    public PeerAddressResolved(ShardIdentifier peerId, String peerAddress) {
         this.peerId = peerId;
         this.peerAddress = peerAddress;
     }
 
-    public String getPeerId() {
+    public ShardIdentifier getPeerId() {
         return peerId;
     }
 
index 1e5a05329b72c38b72637a6ceaeee3002741b64e..dae4cec3c3a606c059771e9335317d1d75c233d7 100644 (file)
@@ -12,7 +12,7 @@ import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommit
 
 public class PreCommitTransaction implements SerializableMessage{
 
-  public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.PreCommitTransaction.class;
+  public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.PreCommitTransaction.class;
 
   @Override
   public Object toSerializable() {
index 1aedae3ae72fb51b0794e12ee338c0ca478c97f4..fc07bfcb4b786793f02125bfad01f77088bcfe33 100644 (file)
@@ -12,7 +12,7 @@ import org.opendaylight.controller.protobuff.messages.cohort3pc.ThreePhaseCommit
 
 public class PreCommitTransactionReply implements SerializableMessage{
 
-  public static Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.PreCommitTransactionReply.class;
+  public static final Class SERIALIZABLE_CLASS = ThreePhaseCommitCohortMessages.PreCommitTransactionReply.class;
 
   @Override
   public Object toSerializable() {
index 20268a67449d2ff1bcddeef240b91ef4f287a774..c154b81e3567d19e6be40db61dbe41690ebca78e 100644 (file)
@@ -18,19 +18,21 @@ public class InstanceIdentifierUtils {
         .getLogger(InstanceIdentifierUtils.class);
 
     public static String getParentPath(String currentElementPath) {
-        String parentPath = "";
+
+        StringBuilder parentPath = new StringBuilder();
 
         if (currentElementPath != null) {
             String[] parentPaths = currentElementPath.split("/");
             if (parentPaths.length > 2) {
                 for (int i = 0; i < parentPaths.length - 1; i++) {
                     if (parentPaths[i].length() > 0) {
-                        parentPath += "/" + parentPaths[i];
+                        parentPath.append( "/");
+                        parentPath.append( parentPaths[i]);
                     }
                 }
             }
         }
-        return parentPath;
+        return parentPath.toString();
     }
 
     @Deprecated
index daac89c4c8adaef39047034138a8318e639f54c8..8af9bd07d793cfb0eb0dae33414b6631d109e4a9 100644 (file)
@@ -1,6 +1,7 @@
 
 odl-cluster-data {
   akka {
+    loggers = ["akka.event.slf4j.Slf4jLogger"]
     cluster {
         roles = [
           "member-1"
@@ -23,7 +24,7 @@ odl-cluster-data {
       netty.tcp {
         hostname = "127.0.0.1"
         port = 2550
-           maximum-frame-size = 2097152
+           maximum-frame-size = 419430400
            send-buffer-size = 52428800
            receive-buffer-size = 52428800
       }
@@ -39,6 +40,7 @@ odl-cluster-data {
 
 odl-cluster-rpc {
   akka {
+    loggers = ["akka.event.slf4j.Slf4jLogger"]
     actor {
       provider = "akka.cluster.ClusterActorRefProvider"
 
index 6599bd8eeb0d0b0b54d9058086d836fb82aa214a..319451f8f00587d7328f5e4c20e4fcc1454051d2 100644 (file)
@@ -14,8 +14,8 @@ import akka.actor.ActorSelection;
 import akka.actor.Props;
 import akka.event.Logging;
 import akka.testkit.JavaTestKit;
-import junit.framework.Assert;
 import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChain;
@@ -37,6 +37,8 @@ import scala.concurrent.duration.FiniteDuration;
 import java.util.Collections;
 
 import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
 
 public class BasicIntegrationTest extends AbstractActorTest {
 
@@ -52,7 +54,11 @@ public class BasicIntegrationTest extends AbstractActorTest {
 
 
         new JavaTestKit(getSystem()) {{
-            final Props props = Shard.props("config", Collections.EMPTY_MAP);
+            final ShardIdentifier identifier =
+                ShardIdentifier.builder().memberName("member-1")
+                    .shardName("inventory").type("config").build();
+
+            final Props props = Shard.props(identifier, Collections.EMPTY_MAP);
             final ActorRef shard = getSystem().actorOf(props);
 
             new Within(duration("5 seconds")) {
@@ -95,7 +101,7 @@ public class BasicIntegrationTest extends AbstractActorTest {
                             }
                         }.get(); // this extracts the received message
 
-                    Assert.assertNotNull(transactionChain);
+                    assertNotNull(transactionChain);
 
                     System.out.println("Successfully created transaction chain");
 
@@ -116,7 +122,7 @@ public class BasicIntegrationTest extends AbstractActorTest {
                             }
                         }.get(); // this extracts the received message
 
-                    Assert.assertNotNull(transaction);
+                    assertNotNull(transaction);
 
                     System.out.println("Successfully created transaction");
 
@@ -135,7 +141,7 @@ public class BasicIntegrationTest extends AbstractActorTest {
                         }
                     }.get(); // this extracts the received message
 
-                    Assert.assertTrue(writeDone);
+                    assertTrue(writeDone);
 
                     System.out.println("Successfully wrote data");
 
@@ -158,7 +164,7 @@ public class BasicIntegrationTest extends AbstractActorTest {
                             }
                         }.get(); // this extracts the received message
 
-                    Assert.assertNotNull(cohort);
+                    assertNotNull(cohort);
 
                     System.out.println("Successfully readied the transaction");
 
@@ -177,7 +183,7 @@ public class BasicIntegrationTest extends AbstractActorTest {
                             }
                         }.get(); // this extracts the received message
 
-                    Assert.assertTrue(preCommitDone);
+                    assertTrue(preCommitDone);
 
                     System.out.println("Successfully pre-committed the transaction");
 
index 56fd3c568a5e2489c59a7172cfc9ad5b78b40713..17329611b00d6b010302eaa9d9ecf503972eb7e0 100644 (file)
@@ -8,6 +8,8 @@ import org.junit.Test;
 import java.io.File;
 import java.util.List;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 public class ConfigurationImplTest {
@@ -31,6 +33,49 @@ public class ConfigurationImplTest {
 
         assertTrue(memberShardNames.contains("people-1"));
         assertTrue(memberShardNames.contains("cars-1"));
+
+        // Retrieve once again to hit cache
+
+        memberShardNames =
+            configuration.getMemberShardNames("member-1");
+
+        assertTrue(memberShardNames.contains("people-1"));
+        assertTrue(memberShardNames.contains("cars-1"));
+
+    }
+
+    @Test
+    public void testGetMembersFromShardName(){
+        List<String> members =
+            configuration.getMembersFromShardName("default");
+
+        assertEquals(3, members.size());
+
+        assertTrue(members.contains("member-1"));
+        assertTrue(members.contains("member-2"));
+        assertTrue(members.contains("member-3"));
+
+        assertFalse(members.contains("member-26"));
+
+        // Retrieve once again to hit cache
+        members =
+            configuration.getMembersFromShardName("default");
+
+        assertEquals(3, members.size());
+
+        assertTrue(members.contains("member-1"));
+        assertTrue(members.contains("member-2"));
+        assertTrue(members.contains("member-3"));
+
+        assertFalse(members.contains("member-26"));
+
+
+        // Try to find a shard which is not present
+
+        members =
+            configuration.getMembersFromShardName("foobar");
+
+        assertEquals(0, members.size());
     }
 
     @Test
index 920248521a297871f95c5312f1d34085900feced..eb2c24292aee6663a669e9b42ba29a82d966fbd6 100644 (file)
@@ -21,7 +21,8 @@ import static org.junit.Assert.assertEquals;
 public class DataChangeListenerRegistrationTest extends AbstractActorTest {
   private static ListeningExecutorService storeExecutor = MoreExecutors.listeningDecorator(MoreExecutors.sameThreadExecutor());
 
-  private static final InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", storeExecutor);
+  private static final InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", storeExecutor,
+          MoreExecutors.sameThreadExecutor());
 
   static {
     store.onGlobalContextUpdated(TestModel.createTestContext());
@@ -37,12 +38,14 @@ public class DataChangeListenerRegistrationTest extends AbstractActorTest {
       final ActorRef subject = getSystem().actorOf(props, "testCloseListenerRegistration");
 
       new Within(duration("1 seconds")) {
+        @Override
         protected void run() {
 
           subject.tell(new CloseDataChangeListenerRegistration().toSerializable(), getRef());
 
           final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
             // do not put code outside this method, will run afterwards
+            @Override
             protected String match(Object in) {
               if (in.getClass().equals(CloseDataChangeListenerRegistrationReply.SERIALIZABLE_CLASS)) {
                 return "match";
index d1beab904984262cd1f83de56a039c5970dc0742..406f0ffd9e6383953554d400d31e3b19496bc7c2 100644 (file)
@@ -1,8 +1,11 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
 import akka.actor.Props;
-import junit.framework.Assert;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
 import org.opendaylight.controller.cluster.datastore.utils.DoNothingActor;
@@ -21,13 +24,20 @@ import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+
 public class DistributedDataStoreTest extends AbstractActorTest{
 
     private DistributedDataStore distributedDataStore;
     private MockActorContext mockActorContext;
     private ActorRef doNothingActorRef;
 
-    @org.junit.Before
+    @Before
     public void setUp() throws Exception {
         ShardStrategyFactory.setConfiguration(new MockConfiguration());
         final Props props = Props.create(DoNothingActor.class);
@@ -35,7 +45,7 @@ public class DistributedDataStoreTest extends AbstractActorTest{
         doNothingActorRef = getSystem().actorOf(props);
 
         mockActorContext = new MockActorContext(getSystem(), doNothingActorRef);
-        distributedDataStore = new DistributedDataStore(mockActorContext, "config");
+        distributedDataStore = new DistributedDataStore(mockActorContext);
         distributedDataStore.onGlobalContextUpdated(
             TestModel.createTestContext());
 
@@ -48,12 +58,22 @@ public class DistributedDataStoreTest extends AbstractActorTest{
                 .build());
     }
 
-    @org.junit.After
+    @After
     public void tearDown() throws Exception {
 
     }
 
-    @org.junit.Test
+    @Test
+    public void testConstructor(){
+        ActorSystem actorSystem = mock(ActorSystem.class);
+
+        new DistributedDataStore(actorSystem, "config",
+            mock(ClusterWrapper.class), mock(Configuration.class));
+
+        verify(actorSystem).actorOf(any(Props.class), eq("shardmanager-config"));
+    }
+
+    @Test
     public void testRegisterChangeListenerWhenShardIsNotLocal() throws Exception {
 
         ListenerRegistration registration =
@@ -65,12 +85,12 @@ public class DistributedDataStoreTest extends AbstractActorTest{
         }, AsyncDataBroker.DataChangeScope.BASE);
 
         // Since we do not expect the shard to be local registration will return a NoOpRegistration
-        Assert.assertTrue(registration instanceof NoOpDataChangeListenerRegistration);
+        assertTrue(registration instanceof NoOpDataChangeListenerRegistration);
 
-        Assert.assertNotNull(registration);
+        assertNotNull(registration);
     }
 
-    @org.junit.Test
+    @Test
     public void testRegisterChangeListenerWhenShardIsLocal() throws Exception {
 
         mockActorContext.setExecuteLocalShardOperationResponse(new RegisterChangeListenerReply(doNothingActorRef.path()));
@@ -83,33 +103,33 @@ public class DistributedDataStoreTest extends AbstractActorTest{
                 }
             }, AsyncDataBroker.DataChangeScope.BASE);
 
-        Assert.assertTrue(registration instanceof DataChangeListenerRegistrationProxy);
+        assertTrue(registration instanceof DataChangeListenerRegistrationProxy);
 
-        Assert.assertNotNull(registration);
+        assertNotNull(registration);
     }
 
 
-    @org.junit.Test
+    @Test
     public void testCreateTransactionChain() throws Exception {
         final DOMStoreTransactionChain transactionChain = distributedDataStore.createTransactionChain();
-        Assert.assertNotNull(transactionChain);
+        assertNotNull(transactionChain);
     }
 
-    @org.junit.Test
+    @Test
     public void testNewReadOnlyTransaction() throws Exception {
         final DOMStoreReadTransaction transaction = distributedDataStore.newReadOnlyTransaction();
-        Assert.assertNotNull(transaction);
+        assertNotNull(transaction);
     }
 
-    @org.junit.Test
+    @Test
     public void testNewWriteOnlyTransaction() throws Exception {
         final DOMStoreWriteTransaction transaction = distributedDataStore.newWriteOnlyTransaction();
-        Assert.assertNotNull(transaction);
+        assertNotNull(transaction);
     }
 
-    @org.junit.Test
+    @Test
     public void testNewReadWriteTransaction() throws Exception {
         final DOMStoreReadWriteTransaction transaction = distributedDataStore.newReadWriteTransaction();
-        Assert.assertNotNull(transaction);
+        assertNotNull(transaction);
     }
 }
index 431a266b148478a49766bd8f0cc173bc7b2e4062..0d86ffb8444bd7c4ff72bd27b079ab17427db066 100644 (file)
@@ -6,6 +6,7 @@ import akka.event.Logging;
 import akka.testkit.JavaTestKit;
 import junit.framework.Assert;
 import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChain;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionChainReply;
@@ -35,7 +36,11 @@ public class ShardTest extends AbstractActorTest {
     @Test
     public void testOnReceiveCreateTransactionChain() throws Exception {
         new JavaTestKit(getSystem()) {{
-            final Props props = Shard.props("config", Collections.EMPTY_MAP);
+            final ShardIdentifier identifier =
+                ShardIdentifier.builder().memberName("member-1")
+                    .shardName("inventory").type("config").build();
+
+            final Props props = Shard.props(identifier, Collections.EMPTY_MAP);
             final ActorRef subject =
                 getSystem().actorOf(props, "testCreateTransactionChain");
 
@@ -87,7 +92,11 @@ public class ShardTest extends AbstractActorTest {
     @Test
     public void testOnReceiveRegisterListener() throws Exception {
         new JavaTestKit(getSystem()) {{
-            final Props props = Shard.props("config", Collections.EMPTY_MAP);
+            final ShardIdentifier identifier =
+                ShardIdentifier.builder().memberName("member-1")
+                    .shardName("inventory").type("config").build();
+
+            final Props props = Shard.props(identifier, Collections.EMPTY_MAP);
             final ActorRef subject =
                 getSystem().actorOf(props, "testRegisterChangeListener");
 
@@ -141,7 +150,11 @@ public class ShardTest extends AbstractActorTest {
     @Test
     public void testCreateTransaction(){
         new JavaTestKit(getSystem()) {{
-            final Props props = Shard.props("config", Collections.EMPTY_MAP);
+            final ShardIdentifier identifier =
+                ShardIdentifier.builder().memberName("member-1")
+                    .shardName("inventory").type("config").build();
+
+            final Props props = Shard.props(identifier, Collections.EMPTY_MAP);
             final ActorRef subject =
                 getSystem().actorOf(props, "testCreateTransaction");
 
@@ -196,9 +209,14 @@ public class ShardTest extends AbstractActorTest {
     @Test
     public void testPeerAddressResolved(){
         new JavaTestKit(getSystem()) {{
-            Map<String, String> peerAddresses = new HashMap<>();
-            peerAddresses.put("member-2", null);
-            final Props props = Shard.props("config", peerAddresses);
+            Map<ShardIdentifier, String> peerAddresses = new HashMap<>();
+
+            final ShardIdentifier identifier =
+                ShardIdentifier.builder().memberName("member-1")
+                    .shardName("inventory").type("config").build();
+
+            peerAddresses.put(identifier, null);
+            final Props props = Shard.props(identifier, peerAddresses);
             final ActorRef subject =
                 getSystem().actorOf(props, "testPeerAddressResolved");
 
@@ -206,7 +224,7 @@ public class ShardTest extends AbstractActorTest {
                 protected void run() {
 
                     subject.tell(
-                        new PeerAddressResolved("member-2", "akka://foobar"),
+                        new PeerAddressResolved(identifier, "akka://foobar"),
                         getRef());
 
                     expectNoMsg();
index b35880a6a501367a4c1155b3cae4ef405352ddb6..d468af6664981d08ad603b1a841fefbdaccc8d47 100644 (file)
@@ -19,7 +19,8 @@ public class ShardTransactionChainTest extends AbstractActorTest {
 
   private static ListeningExecutorService storeExecutor = MoreExecutors.listeningDecorator(MoreExecutors.sameThreadExecutor());
 
-  private static final InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", storeExecutor);
+  private static final InMemoryDOMDataStore store = new InMemoryDOMDataStore("OPER", storeExecutor,
+          MoreExecutors.sameThreadExecutor());
 
   static {
     store.onGlobalContextUpdated(TestModel.createTestContext());
@@ -31,12 +32,14 @@ public class ShardTransactionChainTest extends AbstractActorTest {
       final ActorRef subject = getSystem().actorOf(props, "testCreateTransaction");
 
      new Within(duration("1 seconds")) {
+        @Override
         protected void run() {
 
           subject.tell(new CreateTransaction("txn-1", TransactionProxy.TransactionType.READ_ONLY.ordinal() ).toSerializable(), getRef());
 
           final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
             // do not put code outside this method, will run afterwards
+            @Override
             protected String match(Object in) {
               if (in.getClass().equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
                 return CreateTransactionReply.fromSerializable(in).getTransactionPath();
@@ -66,12 +69,14 @@ public class ShardTransactionChainTest extends AbstractActorTest {
       final ActorRef subject = getSystem().actorOf(props, "testCloseTransactionChain");
 
       new Within(duration("1 seconds")) {
+        @Override
         protected void run() {
 
           subject.tell(new CloseTransactionChain().toSerializable(), getRef());
 
           final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
             // do not put code outside this method, will run afterwards
+            @Override
             protected String match(Object in) {
               if (in.getClass().equals(CloseTransactionChainReply.SERIALIZABLE_CLASS)) {
                 return "match";
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionFailureTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTransactionFailureTest.java
new file mode 100644 (file)
index 0000000..02ceee8
--- /dev/null
@@ -0,0 +1,313 @@
+/*
+ *
+ *  Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ *  This program and the accompanying materials are made available under the
+ *  terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ *  and is available at http://www.eclipse.org/legal/epl-v10.html
+ *
+ */
+
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.Props;
+import akka.testkit.TestActorRef;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
+import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.protobuff.messages.common.NormalizedNodeMessages;
+import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.Duration;
+
+import java.util.Collections;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Covers negative test cases
+ * @author Basheeruddin Ahmed <syedbahm@cisco.com>
+ */
+public class ShardTransactionFailureTest extends AbstractActorTest {
+    private static ListeningExecutorService storeExecutor =
+        MoreExecutors.listeningDecorator(MoreExecutors.sameThreadExecutor());
+
+    private static final InMemoryDOMDataStore store =
+        new InMemoryDOMDataStore("OPER", storeExecutor,
+            MoreExecutors.sameThreadExecutor());
+
+    private static final SchemaContext testSchemaContext =
+        TestModel.createTestContext();
+
+    private static final ShardIdentifier SHARD_IDENTIFIER =
+        ShardIdentifier.builder().memberName("member-1")
+            .shardName("inventory").type("config").build();
+
+    static {
+        store.onGlobalContextUpdated(testSchemaContext);
+    }
+
+
+    @Test(expected = ReadFailedException.class)
+    public void testNegativeReadWithReadOnlyTransactionClosed()
+        throws Throwable {
+
+        final ActorRef shard =
+            getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+        final Props props =
+            ShardTransaction.props(store.newReadOnlyTransaction(), shard,
+                TestModel.createTestContext());
+
+        final TestActorRef<ShardTransaction> subject = TestActorRef
+            .create(getSystem(), props,
+                "testNegativeReadWithReadOnlyTransactionClosed");
+
+        ShardTransactionMessages.ReadData readData =
+            ShardTransactionMessages.ReadData.newBuilder()
+                .setInstanceIdentifierPathArguments(
+                    NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+                        .build()
+                ).build();
+        Future<Object> future =
+            akka.pattern.Patterns.ask(subject, readData, 3000);
+        assertTrue(future.isCompleted());
+        Await.result(future, Duration.Zero());
+
+        ((ShardReadTransaction) subject.underlyingActor())
+            .forUnitTestOnlyExplicitTransactionClose();
+
+        future = akka.pattern.Patterns.ask(subject, readData, 3000);
+        Await.result(future, Duration.Zero());
+
+
+    }
+
+
+    @Test(expected = ReadFailedException.class)
+    public void testNegativeReadWithReadWriteOnlyTransactionClosed()
+        throws Throwable {
+
+        final ActorRef shard =
+            getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+        final Props props =
+            ShardTransaction.props(store.newReadWriteTransaction(), shard,
+                TestModel.createTestContext());
+
+        final TestActorRef<ShardTransaction> subject = TestActorRef
+            .create(getSystem(), props,
+                "testNegativeReadWithReadWriteOnlyTransactionClosed");
+
+        ShardTransactionMessages.ReadData readData =
+            ShardTransactionMessages.ReadData.newBuilder()
+                .setInstanceIdentifierPathArguments(
+                    NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+                        .build()
+                ).build();
+        Future<Object> future =
+            akka.pattern.Patterns.ask(subject, readData, 3000);
+        assertTrue(future.isCompleted());
+        Await.result(future, Duration.Zero());
+
+        ((ShardReadWriteTransaction) subject.underlyingActor())
+            .forUnitTestOnlyExplicitTransactionClose();
+
+        future = akka.pattern.Patterns.ask(subject, readData, 3000);
+        Await.result(future, Duration.Zero());
+
+
+    }
+
+    @Test(expected = ReadFailedException.class)
+    public void testNegativeExistsWithReadWriteOnlyTransactionClosed()
+        throws Throwable {
+
+        final ActorRef shard =
+            getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+        final Props props =
+            ShardTransaction.props(store.newReadWriteTransaction(), shard,
+                TestModel.createTestContext());
+
+        final TestActorRef<ShardTransaction> subject = TestActorRef
+            .create(getSystem(), props,
+                "testNegativeExistsWithReadWriteOnlyTransactionClosed");
+
+        ShardTransactionMessages.DataExists dataExists =
+            ShardTransactionMessages.DataExists.newBuilder()
+                .setInstanceIdentifierPathArguments(
+                    NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+                        .build()
+                ).build();
+
+        Future<Object> future =
+            akka.pattern.Patterns.ask(subject, dataExists, 3000);
+        assertTrue(future.isCompleted());
+        Await.result(future, Duration.Zero());
+
+        ((ShardReadWriteTransaction) subject.underlyingActor())
+            .forUnitTestOnlyExplicitTransactionClose();
+
+        future = akka.pattern.Patterns.ask(subject, dataExists, 3000);
+        Await.result(future, Duration.Zero());
+
+
+    }
+
+    @Test(expected = IllegalStateException.class)
+    public void testNegativeWriteWithTransactionReady() throws Exception {
+
+
+        final ActorRef shard =
+            getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+        final Props props =
+            ShardTransaction.props(store.newWriteOnlyTransaction(), shard,
+                TestModel.createTestContext());
+
+        final TestActorRef<ShardTransaction> subject = TestActorRef
+            .create(getSystem(), props,
+                "testNegativeWriteWithTransactionReady");
+
+        ShardTransactionMessages.ReadyTransaction readyTransaction =
+            ShardTransactionMessages.ReadyTransaction.newBuilder().build();
+
+        Future<Object> future =
+            akka.pattern.Patterns.ask(subject, readyTransaction, 3000);
+        assertTrue(future.isCompleted());
+        Await.result(future, Duration.Zero());
+
+        ShardTransactionMessages.WriteData writeData =
+            ShardTransactionMessages.WriteData.newBuilder()
+                .setInstanceIdentifierPathArguments(
+                    NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+                        .build()).setNormalizedNode(
+                NormalizedNodeMessages.Node.newBuilder().build()
+
+            ).build();
+
+        future = akka.pattern.Patterns.ask(subject, writeData, 3000);
+        assertTrue(future.isCompleted());
+        Await.result(future, Duration.Zero());
+
+
+    }
+
+
+    @Test(expected = IllegalStateException.class)
+    public void testNegativeReadWriteWithTransactionReady() throws Exception {
+
+
+        final ActorRef shard =
+            getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+        final Props props =
+            ShardTransaction.props(store.newReadWriteTransaction(), shard,
+                TestModel.createTestContext());
+
+        final TestActorRef<ShardTransaction> subject = TestActorRef
+            .create(getSystem(), props,
+                "testNegativeReadWriteWithTransactionReady");
+
+        ShardTransactionMessages.ReadyTransaction readyTransaction =
+            ShardTransactionMessages.ReadyTransaction.newBuilder().build();
+
+        Future<Object> future =
+            akka.pattern.Patterns.ask(subject, readyTransaction, 3000);
+        assertTrue(future.isCompleted());
+        Await.result(future, Duration.Zero());
+
+        ShardTransactionMessages.WriteData writeData =
+            ShardTransactionMessages.WriteData.newBuilder()
+                .setInstanceIdentifierPathArguments(
+                    NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+                        .build()).setNormalizedNode(
+                NormalizedNodeMessages.Node.newBuilder().build()
+
+            ).build();
+
+        future = akka.pattern.Patterns.ask(subject, writeData, 3000);
+        assertTrue(future.isCompleted());
+        Await.result(future, Duration.Zero());
+
+
+    }
+
+    @Test(expected = IllegalStateException.class)
+    public void testNegativeMergeTransactionReady() throws Exception {
+
+
+        final ActorRef shard =
+            getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+        final Props props =
+            ShardTransaction.props(store.newReadWriteTransaction(), shard,
+                TestModel.createTestContext());
+
+        final TestActorRef<ShardTransaction> subject = TestActorRef
+            .create(getSystem(), props, "testNegativeMergeTransactionReady");
+
+        ShardTransactionMessages.ReadyTransaction readyTransaction =
+            ShardTransactionMessages.ReadyTransaction.newBuilder().build();
+
+        Future<Object> future =
+            akka.pattern.Patterns.ask(subject, readyTransaction, 3000);
+        assertTrue(future.isCompleted());
+        Await.result(future, Duration.Zero());
+
+        ShardTransactionMessages.MergeData mergeData =
+            ShardTransactionMessages.MergeData.newBuilder()
+                .setInstanceIdentifierPathArguments(
+                    NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+                        .build()).setNormalizedNode(
+                NormalizedNodeMessages.Node.newBuilder().build()
+
+            ).build();
+
+        future = akka.pattern.Patterns.ask(subject, mergeData, 3000);
+        assertTrue(future.isCompleted());
+        Await.result(future, Duration.Zero());
+
+
+    }
+
+
+    @Test(expected = IllegalStateException.class)
+    public void testNegativeDeleteDataWhenTransactionReady() throws Exception {
+
+
+        final ActorRef shard =
+            getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+        final Props props =
+            ShardTransaction.props(store.newReadWriteTransaction(), shard,
+                TestModel.createTestContext());
+
+        final TestActorRef<ShardTransaction> subject = TestActorRef
+            .create(getSystem(), props,
+                "testNegativeDeleteDataWhenTransactionReady");
+
+        ShardTransactionMessages.ReadyTransaction readyTransaction =
+            ShardTransactionMessages.ReadyTransaction.newBuilder().build();
+
+        Future<Object> future =
+            akka.pattern.Patterns.ask(subject, readyTransaction, 3000);
+        assertTrue(future.isCompleted());
+        Await.result(future, Duration.Zero());
+
+        ShardTransactionMessages.DeleteData deleteData =
+            ShardTransactionMessages.DeleteData.newBuilder()
+                .setInstanceIdentifierPathArguments(
+                    NormalizedNodeMessages.InstanceIdentifier.newBuilder()
+                        .build()).build();
+
+        future = akka.pattern.Patterns.ask(subject, deleteData, 3000);
+        assertTrue(future.isCompleted());
+        Await.result(future, Duration.Zero());
+
+
+    }
+
+
+}
index 632ecc29cd31b727f714be859154a53182ade178..78895b2366a96aa7e2c85aaa97f2b7a6218a92a2 100644 (file)
@@ -9,8 +9,12 @@ import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.MoreExecutors;
 import org.junit.Assert;
 import org.junit.Test;
+import org.opendaylight.controller.cluster.datastore.exceptions.UnknownMessageException;
+import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransactionReply;
+import org.opendaylight.controller.cluster.datastore.messages.DataExists;
+import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
 import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
 import org.opendaylight.controller.cluster.datastore.messages.DeleteDataReply;
 import org.opendaylight.controller.cluster.datastore.messages.MergeData;
@@ -42,10 +46,15 @@ public class ShardTransactionTest extends AbstractActorTest {
         MoreExecutors.listeningDecorator(MoreExecutors.sameThreadExecutor());
 
     private static final InMemoryDOMDataStore store =
-        new InMemoryDOMDataStore("OPER", storeExecutor);
+        new InMemoryDOMDataStore("OPER", storeExecutor, MoreExecutors.sameThreadExecutor());
 
     private static final SchemaContext testSchemaContext = TestModel.createTestContext();
 
+    private static final ShardIdentifier SHARD_IDENTIFIER =
+        ShardIdentifier.builder().memberName("member-1")
+            .shardName("inventory").type("config").build();
+
+
     static {
         store.onGlobalContextUpdated(testSchemaContext);
     }
@@ -53,12 +62,13 @@ public class ShardTransactionTest extends AbstractActorTest {
     @Test
     public void testOnReceiveReadData() throws Exception {
         new JavaTestKit(getSystem()) {{
-            final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+            final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
             final Props props =
                 ShardTransaction.props(store.newReadOnlyTransaction(), shard, testSchemaContext);
             final ActorRef subject = getSystem().actorOf(props, "testReadData");
 
             new Within(duration("1 seconds")) {
+                @Override
                 protected void run() {
 
                     subject.tell(
@@ -67,6 +77,7 @@ public class ShardTransactionTest extends AbstractActorTest {
 
                     final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
                         // do not put code outside this method, will run afterwards
+                        @Override
                         protected String match(Object in) {
                             if (in.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
                               if (ReadDataReply.fromSerializable(testSchemaContext,YangInstanceIdentifier.builder().build(), in)
@@ -93,12 +104,13 @@ public class ShardTransactionTest extends AbstractActorTest {
     @Test
     public void testOnReceiveReadDataWhenDataNotFound() throws Exception {
         new JavaTestKit(getSystem()) {{
-            final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+            final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
             final Props props =
                 ShardTransaction.props( store.newReadOnlyTransaction(), shard, testSchemaContext);
             final ActorRef subject = getSystem().actorOf(props, "testReadDataWhenDataNotFound");
 
             new Within(duration("1 seconds")) {
+                @Override
                 protected void run() {
 
                     subject.tell(
@@ -107,6 +119,7 @@ public class ShardTransactionTest extends AbstractActorTest {
 
                     final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
                         // do not put code outside this method, will run afterwards
+                        @Override
                         protected String match(Object in) {
                             if (in.getClass().equals(ReadDataReply.SERIALIZABLE_CLASS)) {
                                 if (ReadDataReply.fromSerializable(testSchemaContext,TestModel.TEST_PATH, in)
@@ -131,10 +144,95 @@ public class ShardTransactionTest extends AbstractActorTest {
         }};
     }
 
+    @Test
+    public void testOnReceiveDataExistsPositive() throws Exception {
+        new JavaTestKit(getSystem()) {{
+            final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+            final Props props =
+                ShardTransaction.props(store.newReadOnlyTransaction(), shard, testSchemaContext);
+            final ActorRef subject = getSystem().actorOf(props, "testDataExistsPositive");
+
+            new Within(duration("1 seconds")) {
+                @Override
+                protected void run() {
+
+                    subject.tell(
+                        new DataExists(YangInstanceIdentifier.builder().build()).toSerializable(),
+                        getRef());
+
+                    final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
+                        // do not put code outside this method, will run afterwards
+                        @Override
+                        protected String match(Object in) {
+                            if (in.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
+                                if (DataExistsReply.fromSerializable(in)
+                                    .exists()) {
+                                    return "match";
+                                }
+                                return null;
+                            } else {
+                                throw noMatch();
+                            }
+                        }
+                    }.get(); // this extracts the received message
+
+                    assertEquals("match", out);
+
+                    expectNoMsg();
+                }
+
+
+            };
+        }};
+    }
+
+    @Test
+    public void testOnReceiveDataExistsNegative() throws Exception {
+        new JavaTestKit(getSystem()) {{
+            final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
+            final Props props =
+                ShardTransaction.props(store.newReadOnlyTransaction(), shard, testSchemaContext);
+            final ActorRef subject = getSystem().actorOf(props, "testDataExistsNegative");
+
+            new Within(duration("1 seconds")) {
+                @Override
+                protected void run() {
+
+                    subject.tell(
+                        new DataExists(TestModel.TEST_PATH).toSerializable(),
+                        getRef());
+
+                    final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
+                        // do not put code outside this method, will run afterwards
+                        @Override
+                        protected String match(Object in) {
+                            if (in.getClass().equals(DataExistsReply.SERIALIZABLE_CLASS)) {
+                                if (!DataExistsReply.fromSerializable(in)
+                                    .exists()) {
+                                    return "match";
+                                }
+                                return null;
+                            } else {
+                                throw noMatch();
+                            }
+                        }
+                    }.get(); // this extracts the received message
+
+                    assertEquals("match", out);
+
+                    expectNoMsg();
+                }
+
+
+            };
+        }};
+    }
+
     private void assertModification(final ActorRef subject,
         final Class<? extends Modification> modificationType) {
         new JavaTestKit(getSystem()) {{
             new Within(duration("1 seconds")) {
+                @Override
                 protected void run() {
                     subject
                         .tell(new ShardTransaction.GetCompositedModification(),
@@ -143,6 +241,7 @@ public class ShardTransactionTest extends AbstractActorTest {
                     final CompositeModification compositeModification =
                         new ExpectMsg<CompositeModification>(duration("1 seconds"), "match hint") {
                             // do not put code outside this method, will run afterwards
+                            @Override
                             protected CompositeModification match(Object in) {
                                 if (in instanceof ShardTransaction.GetCompositeModificationReply) {
                                     return ((ShardTransaction.GetCompositeModificationReply) in)
@@ -167,13 +266,14 @@ public class ShardTransactionTest extends AbstractActorTest {
     @Test
     public void testOnReceiveWriteData() throws Exception {
         new JavaTestKit(getSystem()) {{
-            final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+            final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
             final Props props =
                 ShardTransaction.props(store.newWriteOnlyTransaction(), shard, TestModel.createTestContext());
             final ActorRef subject =
                 getSystem().actorOf(props, "testWriteData");
 
             new Within(duration("1 seconds")) {
+                @Override
                 protected void run() {
 
                     subject.tell(new WriteData(TestModel.TEST_PATH,
@@ -182,6 +282,7 @@ public class ShardTransactionTest extends AbstractActorTest {
 
                     final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
                         // do not put code outside this method, will run afterwards
+                        @Override
                         protected String match(Object in) {
                             if (in.getClass().equals(WriteDataReply.SERIALIZABLE_CLASS)) {
                                 return "match";
@@ -205,13 +306,14 @@ public class ShardTransactionTest extends AbstractActorTest {
     @Test
     public void testOnReceiveMergeData() throws Exception {
         new JavaTestKit(getSystem()) {{
-            final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+            final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
             final Props props =
                 ShardTransaction.props(store.newReadWriteTransaction(), shard, testSchemaContext);
             final ActorRef subject =
                 getSystem().actorOf(props, "testMergeData");
 
             new Within(duration("1 seconds")) {
+                @Override
                 protected void run() {
 
                     subject.tell(new MergeData(TestModel.TEST_PATH,
@@ -220,6 +322,7 @@ public class ShardTransactionTest extends AbstractActorTest {
 
                     final String out = new ExpectMsg<String>(duration("500 milliseconds"), "match hint") {
                         // do not put code outside this method, will run afterwards
+                        @Override
                         protected String match(Object in) {
                             if (in.getClass().equals(MergeDataReply.SERIALIZABLE_CLASS)) {
                                 return "match";
@@ -244,19 +347,21 @@ public class ShardTransactionTest extends AbstractActorTest {
     @Test
     public void testOnReceiveDeleteData() throws Exception {
         new JavaTestKit(getSystem()) {{
-            final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+            final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
             final Props props =
                 ShardTransaction.props( store.newWriteOnlyTransaction(), shard, TestModel.createTestContext());
             final ActorRef subject =
                 getSystem().actorOf(props, "testDeleteData");
 
             new Within(duration("1 seconds")) {
+                @Override
                 protected void run() {
 
                     subject.tell(new DeleteData(TestModel.TEST_PATH).toSerializable(), getRef());
 
                     final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
                         // do not put code outside this method, will run afterwards
+                        @Override
                         protected String match(Object in) {
                             if (in.getClass().equals(DeleteDataReply.SERIALIZABLE_CLASS)) {
                                 return "match";
@@ -281,19 +386,21 @@ public class ShardTransactionTest extends AbstractActorTest {
     @Test
     public void testOnReceiveReadyTransaction() throws Exception {
         new JavaTestKit(getSystem()) {{
-            final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+            final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
             final Props props =
                 ShardTransaction.props( store.newReadWriteTransaction(), shard, TestModel.createTestContext());
             final ActorRef subject =
                 getSystem().actorOf(props, "testReadyTransaction");
 
             new Within(duration("1 seconds")) {
+                @Override
                 protected void run() {
 
                     subject.tell(new ReadyTransaction().toSerializable(), getRef());
 
                     final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
                         // do not put code outside this method, will run afterwards
+                        @Override
                         protected String match(Object in) {
                             if (in.getClass().equals(ReadyTransactionReply.SERIALIZABLE_CLASS)) {
                                 return "match";
@@ -317,7 +424,7 @@ public class ShardTransactionTest extends AbstractActorTest {
     @Test
     public void testOnReceiveCloseTransaction() throws Exception {
         new JavaTestKit(getSystem()) {{
-            final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+            final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
             final Props props =
                 ShardTransaction.props(store.newReadWriteTransaction(), shard, TestModel.createTestContext());
             final ActorRef subject =
@@ -326,12 +433,14 @@ public class ShardTransactionTest extends AbstractActorTest {
             watch(subject);
 
             new Within(duration("2 seconds")) {
+                @Override
                 protected void run() {
 
                     subject.tell(new CloseTransaction().toSerializable(), getRef());
 
                     final String out = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
                         // do not put code outside this method, will run afterwards
+                        @Override
                         protected String match(Object in) {
                             if (in.getClass().equals(CloseTransactionReply.SERIALIZABLE_CLASS)) {
                                 return "match";
@@ -345,6 +454,7 @@ public class ShardTransactionTest extends AbstractActorTest {
 
                     final String termination = new ExpectMsg<String>(duration("1 seconds"), "match hint") {
                         // do not put code outside this method, will run afterwards
+                        @Override
                         protected String match(Object in) {
                             if (in instanceof Terminated) {
                                 return "match";
@@ -369,7 +479,7 @@ public class ShardTransactionTest extends AbstractActorTest {
   public void testNegativePerformingWriteOperationOnReadTransaction() throws Exception {
     try {
 
-        final ActorRef shard = getSystem().actorOf(Shard.props("config", Collections.EMPTY_MAP));
+        final ActorRef shard = getSystem().actorOf(Shard.props(SHARD_IDENTIFIER, Collections.EMPTY_MAP));
         final Props props =
             ShardTransaction.props(store.newReadOnlyTransaction(), shard, TestModel.createTestContext());
          final TestActorRef subject = TestActorRef.apply(props,getSystem());
@@ -379,8 +489,8 @@ public class ShardTransactionTest extends AbstractActorTest {
 
 
     } catch (Exception cs) {
-      assertEquals(cs.getClass().getSimpleName(), Exception.class.getSimpleName());
-      assertTrue(cs.getMessage().startsWith("ShardTransaction:handleRecieve received an unknown message"));
+      assertEquals(UnknownMessageException.class.getSimpleName(), cs.getClass().getSimpleName());
+      assertTrue(cs.getMessage(), cs.getMessage().startsWith("Unknown message received "));
     }
   }
 }
index 0cd029c2ffb2da4c093ede214ca037d5a5a345dc..62052f38ab89b6962dc31622332f85113492c924 100644 (file)
@@ -2,20 +2,19 @@ package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorRef;
 import akka.actor.Props;
-
 import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.MoreExecutors;
-
 import junit.framework.Assert;
-
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.datastore.exceptions.PrimaryNotFoundException;
 import org.opendaylight.controller.cluster.datastore.exceptions.TimeoutException;
 import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
+import org.opendaylight.controller.cluster.datastore.messages.DataExistsReply;
 import org.opendaylight.controller.cluster.datastore.messages.DeleteData;
 import org.opendaylight.controller.cluster.datastore.messages.MergeData;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
@@ -30,6 +29,7 @@ import org.opendaylight.controller.cluster.datastore.utils.MockActorContext;
 import org.opendaylight.controller.cluster.datastore.utils.MockClusterWrapper;
 import org.opendaylight.controller.cluster.datastore.utils.MockConfiguration;
 import org.opendaylight.controller.md.cluster.datastore.model.TestModel;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
 import org.opendaylight.controller.protobuff.messages.transaction.ShardTransactionMessages.CreateTransactionReply;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
@@ -81,6 +81,10 @@ public class TransactionProxyTest extends AbstractActorTest {
                 TransactionProxy.TransactionType.READ_ONLY, transactionExecutor, TestModel.createTestContext());
 
 
+        actorContext.setExecuteRemoteOperationResponse(
+            new ReadDataReply(TestModel.createTestContext(), null)
+                .toSerializable());
+
         ListenableFuture<Optional<NormalizedNode<?, ?>>> read =
             transactionProxy.read(TestModel.TEST_PATH);
 
@@ -99,7 +103,7 @@ public class TransactionProxyTest extends AbstractActorTest {
     }
 
     @Test
-    public void testReadWhenANullIsReturned() throws Exception {
+    public void testExists() throws Exception {
         final Props props = Props.create(DoNothingActor.class);
         final ActorRef actorRef = getSystem().actorOf(props);
 
@@ -108,26 +112,57 @@ public class TransactionProxyTest extends AbstractActorTest {
         actorContext.setExecuteShardOperationResponse(createTransactionReply(actorRef));
         actorContext.setExecuteRemoteOperationResponse("message");
 
+
         TransactionProxy transactionProxy =
             new TransactionProxy(actorContext,
                 TransactionProxy.TransactionType.READ_ONLY, transactionExecutor, TestModel.createTestContext());
 
 
-        ListenableFuture<Optional<NormalizedNode<?, ?>>> read =
-            transactionProxy.read(TestModel.TEST_PATH);
+        actorContext.setExecuteRemoteOperationResponse(new DataExistsReply(false).toSerializable());
 
-        Optional<NormalizedNode<?, ?>> normalizedNodeOptional = read.get();
+        CheckedFuture<Boolean, ReadFailedException> exists =
+            transactionProxy.exists(TestModel.TEST_PATH);
 
-        Assert.assertFalse(normalizedNodeOptional.isPresent());
+        Assert.assertFalse(exists.checkedGet());
 
-        actorContext.setExecuteRemoteOperationResponse(new ReadDataReply(
-           TestModel.createTestContext(), null).toSerializable());
+        actorContext.setExecuteRemoteOperationResponse(new DataExistsReply(true).toSerializable());
 
-        read = transactionProxy.read(TestModel.TEST_PATH);
+        exists = transactionProxy.exists(TestModel.TEST_PATH);
 
-        normalizedNodeOptional = read.get();
+        Assert.assertTrue(exists.checkedGet());
 
-        Assert.assertFalse(normalizedNodeOptional.isPresent());
+        actorContext.setExecuteRemoteOperationResponse("bad message");
+
+        exists = transactionProxy.exists(TestModel.TEST_PATH);
+
+        try {
+            exists.checkedGet();
+            fail();
+        } catch(ReadFailedException e){
+        }
+
+    }
+
+    @Test(expected = ReadFailedException.class)
+    public void testReadWhenAnInvalidMessageIsSentInReply() throws Exception {
+        final Props props = Props.create(DoNothingActor.class);
+        final ActorRef actorRef = getSystem().actorOf(props);
+
+        final MockActorContext actorContext = new MockActorContext(this.getSystem());
+        actorContext.setExecuteLocalOperationResponse(createPrimaryFound(actorRef));
+        actorContext.setExecuteShardOperationResponse(createTransactionReply(actorRef));
+        actorContext.setExecuteRemoteOperationResponse("message");
+
+        TransactionProxy transactionProxy =
+            new TransactionProxy(actorContext,
+                TransactionProxy.TransactionType.READ_ONLY, transactionExecutor, TestModel.createTestContext());
+
+
+
+        CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>
+            read = transactionProxy.read(TestModel.TEST_PATH);
+
+        read.checkedGet();
     }
 
     @Test
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardIdentifierTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardIdentifierTest.java
new file mode 100644 (file)
index 0000000..afcd045
--- /dev/null
@@ -0,0 +1,18 @@
+package org.opendaylight.controller.cluster.datastore.identifiers;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+public class ShardIdentifierTest {
+
+    @Test
+    public void testBasic(){
+        ShardIdentifier id = ShardIdentifier.builder().memberName("member-1")
+            .shardName("inventory").type("config").build();
+
+        assertEquals("member-1-shard-inventory-config", id.toString());
+    }
+
+
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardManagerIdentifierTest.java b/opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/identifiers/ShardManagerIdentifierTest.java
new file mode 100644 (file)
index 0000000..44bb4b3
--- /dev/null
@@ -0,0 +1,14 @@
+package org.opendaylight.controller.cluster.datastore.identifiers;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+    public class ShardManagerIdentifierTest {
+
+    @Test
+    public void testIdentifier(){
+        assertEquals("shardmanager-operational", ShardManagerIdentifier.builder().type("operational").build().toString());
+    }
+
+}
index f7c467652d329550dc665e53a2dd7b93e9342d12..41adcc55b188296f899b1569e98276906b233c1e 100644 (file)
@@ -8,48 +8,86 @@ import org.opendaylight.controller.cluster.datastore.jmx.mbeans.AbstractBaseMBea
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
+import java.text.SimpleDateFormat;
+import java.util.Date;
 
 public class ShardStatsTest {
-  private MBeanServer mbeanServer;
private  ShardStats  shardStats;
-  private ObjectName testMBeanName;
+    private MBeanServer mbeanServer;
   private ShardStats shardStats;
+    private ObjectName testMBeanName;
 
-  @Before
-  public void setUp() throws Exception {
+    @Before
+    public void setUp() throws Exception {
 
-    shardStats = new ShardStats("shard-1");
-    shardStats.registerMBean();
-    mbeanServer= shardStats.getMBeanServer();
-    String objectName = AbstractBaseMBean.BASE_JMX_PREFIX + "type="+shardStats.getMBeanType()+",Category="+
-        shardStats.getMBeanCategory() + ",name="+
-        shardStats.getMBeanName();
-    testMBeanName = new ObjectName(objectName);
-  }
+        shardStats = new ShardStats("shard-1");
+        shardStats.registerMBean();
+        mbeanServer = shardStats.getMBeanServer();
+        String objectName =
+            AbstractBaseMBean.BASE_JMX_PREFIX + "type=" + shardStats
+                .getMBeanType() + ",Category=" +
+                shardStats.getMBeanCategory() + ",name=" +
+                shardStats.getMBeanName();
+        testMBeanName = new ObjectName(objectName);
+    }
 
-  @After
-  public void tearDown() throws Exception {
-    shardStats.unregisterMBean();
-  }
+    @After
+    public void tearDown() throws Exception {
+        shardStats.unregisterMBean();
+    }
 
-  @Test
-  public void testGetShardName() throws Exception {
+    @Test
+    public void testGetShardName() throws Exception {
 
-    Object attribute = mbeanServer.getAttribute(testMBeanName,"ShardName");
-    Assert.assertEquals((String) attribute, "shard-1");
+        Object attribute = mbeanServer.getAttribute(testMBeanName, "ShardName");
+        Assert.assertEquals((String) attribute, "shard-1");
 
-  }
+    }
 
-  @Test
-  public void testGetCommittedTransactionsCount() throws Exception {
-    //let us increment some transactions count and then check
-    shardStats.incrementCommittedTransactionCount();
-    shardStats.incrementCommittedTransactionCount();
-    shardStats.incrementCommittedTransactionCount();
+    @Test
+    public void testGetCommittedTransactionsCount() throws Exception {
+        //let us increment some transactions count and then check
+        shardStats.incrementCommittedTransactionCount();
+        shardStats.incrementCommittedTransactionCount();
+        shardStats.incrementCommittedTransactionCount();
 
-    //now let us get from MBeanServer what is the transaction count.
-    Object attribute = mbeanServer.getAttribute(testMBeanName,"CommittedTransactionsCount");
-    Assert.assertEquals((Long) attribute, (Long)3L);
+        //now let us get from MBeanServer what is the transaction count.
+        Object attribute = mbeanServer.getAttribute(testMBeanName,
+            "CommittedTransactionsCount");
+        Assert.assertEquals((Long) attribute, (Long) 3L);
 
 
-  }
-}
\ No newline at end of file
+    }
+
+    @Test
+    public void testGetLastCommittedTransactionTime() throws Exception {
+        SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
+        Assert.assertEquals(shardStats.getLastCommittedTransactionTime(),
+            sdf.format(new Date(0L)));
+        long millis = System.currentTimeMillis();
+        shardStats.setLastCommittedTransactionTime(new Date(millis));
+
+        //now let us get from MBeanServer what is the transaction count.
+        Object attribute = mbeanServer.getAttribute(testMBeanName,
+            "LastCommittedTransactionTime");
+        Assert.assertEquals((String) attribute, sdf.format(new Date(millis)));
+        Assert.assertNotEquals((String) attribute,
+            sdf.format(new Date(millis - 1)));
+
+    }
+
+    @Test
+    public void testGetFailedTransactionsCount() throws Exception {
+        //let us increment some transactions count and then check
+        shardStats.incrementFailedTransactionsCount();
+        shardStats.incrementFailedTransactionsCount();
+
+
+        //now let us get from MBeanServer what is the transaction count.
+        Object attribute =
+            mbeanServer.getAttribute(testMBeanName, "FailedTransactionsCount");
+        Assert.assertEquals((Long) attribute, (Long) 2L);
+
+
+
+    }
+}
index d9c550a6db482d39855dc5a48ebb26b2fec8c6ad..84f3b92f1ba6dba08ae7a29992fe7d9e39b4e33c 100644 (file)
@@ -26,7 +26,8 @@ public abstract class AbstractModificationTest {
 
   @Before
   public void setUp(){
-    store = new InMemoryDOMDataStore("test", MoreExecutors.sameThreadExecutor());
+    store = new InMemoryDOMDataStore("test", MoreExecutors.sameThreadExecutor(),
+            MoreExecutors.sameThreadExecutor());
     store.onGlobalContextUpdated(TestModel.createTestContext());
   }
 
index eda1c304e42bcac73e3e8597a3028324732e223e..27b0374bacbaa6b224c52ceaf7d33a83ffa554fb 100644 (file)
@@ -1,5 +1,6 @@
 akka {
-    loggers = [akka.testkit.TestEventListener]
+    loggers = ["akka.testkit.TestEventListener", "akka.event.slf4j.Slf4jLogger"]
+
     actor {
          serializers {
                   java = "akka.serialization.JavaSerializer"
index fc251c8445a77d1485a2dffc4880afa9b3907f95..9b70f0c4d708d63b1544d05dd1948df35e209912 100644 (file)
@@ -47,4 +47,22 @@ public interface DOMDataReadTransaction extends AsyncReadTransaction<YangInstanc
      */
     CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(
             LogicalDatastoreType store, YangInstanceIdentifier path);
+
+    /**
+     * Checks if data is available in the logical data store located at provided path
+     *
+     * @param path
+     *            Path which uniquely identifies subtree which client want to
+     *            check existence of
+     * @return a CheckFuture containing the result of the check.
+     *         <ul>
+     *         <li>If the data at the supplied path exists, the Future returns a Boolean
+     *         whose value is true, false otherwise</li>
+     *         <li>If checking for the data fails, the Future will fail with a
+     *         {@link ReadFailedException} or an exception derived from ReadFailedException.</li>
+     *         </ul>
+     */
+    CheckedFuture<Boolean, ReadFailedException> exists(
+        LogicalDatastoreType store, YangInstanceIdentifier path);
+
 }
diff --git a/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/RpcImplementationUnavailableException.java b/opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/sal/core/api/RpcImplementationUnavailableException.java
new file mode 100644 (file)
index 0000000..3710822
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.core.api;
+
+/**
+ * Exception reported when no RPC implementation is found in the system.
+ */
+public class RpcImplementationUnavailableException extends RuntimeException {
+    private static final long serialVersionUID = 1L;
+
+    public RpcImplementationUnavailableException(final String message) {
+        super(message);
+    }
+
+    public RpcImplementationUnavailableException(final String message, final Throwable cause) {
+        super(message, cause);
+    }
+}
index 22dad6af23c1464b63f8a4aa25075551b64ef686..948f3c8d8b637b8dfb72fdd376fa7ee1f49aa3c3 100644 (file)
@@ -7,18 +7,18 @@
  */
 package org.opendaylight.controller.config.yang.md.sal.dom.impl;
 
-import java.util.concurrent.Executors;
-
+import java.util.concurrent.Executor;
+import java.util.concurrent.ExecutorService;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
 import org.opendaylight.controller.md.sal.dom.broker.impl.DOMDataBrokerImpl;
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
 import org.opendaylight.controller.sal.core.spi.data.DOMStore;
 import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.opendaylight.yangtools.util.PropertyUtils;
 
 import com.google.common.collect.ImmutableMap;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
 
 /**
 *
@@ -26,6 +26,17 @@ import com.google.common.util.concurrent.MoreExecutors;
 public final class DomInmemoryDataBrokerModule extends
         org.opendaylight.controller.config.yang.md.sal.dom.impl.AbstractDomInmemoryDataBrokerModule {
 
+    private static final String FUTURE_CALLBACK_EXECUTOR_MAX_QUEUE_SIZE_PROP =
+            "mdsal.datastore-future-callback-queue.size";
+    private static final int DEFAULT_FUTURE_CALLBACK_EXECUTOR_MAX_QUEUE_SIZE = 1000;
+
+    private static final String FUTURE_CALLBACK_EXECUTOR_MAX_POOL_SIZE_PROP =
+            "mdsal.datastore-future-callback-pool.size";
+    private static final int DEFAULT_FUTURE_CALLBACK_EXECUTOR_MAX_POOL_SIZE = 20;
+    private static final String COMMIT_EXECUTOR_MAX_QUEUE_SIZE_PROP =
+            "mdsal.datastore-commit-queue.size";
+    private static final int DEFAULT_COMMIT_EXECUTOR_MAX_QUEUE_SIZE = 5000;
+
     public DomInmemoryDataBrokerModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier,
             final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
         super(identifier, dependencyResolver);
@@ -45,30 +56,55 @@ public final class DomInmemoryDataBrokerModule extends
 
     @Override
     public java.lang.AutoCloseable createInstance() {
-        ListeningExecutorService storeExecutor = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(2));
         //Initializing Operational DOM DataStore defaulting to InMemoryDOMDataStore if one is not configured
         DOMStore operStore =  getOperationalDataStoreDependency();
         if(operStore == null){
            //we will default to InMemoryDOMDataStore creation
-          operStore = new InMemoryDOMDataStore("DOM-OPER", storeExecutor);
-          //here we will register the SchemaContext listener
-          getSchemaServiceDependency().registerSchemaContextListener((InMemoryDOMDataStore)operStore);
+          operStore = InMemoryDOMDataStoreFactory.create("DOM-OPER", getSchemaServiceDependency());
         }
 
         DOMStore configStore = getConfigDataStoreDependency();
         if(configStore == null){
            //we will default to InMemoryDOMDataStore creation
-           configStore = new InMemoryDOMDataStore("DOM-CFG", storeExecutor);
-          //here we will register the SchemaContext listener
-          getSchemaServiceDependency().registerSchemaContextListener((InMemoryDOMDataStore)configStore);
+           configStore = InMemoryDOMDataStoreFactory.create("DOM-CFG", getSchemaServiceDependency());
         }
         ImmutableMap<LogicalDatastoreType, DOMStore> datastores = ImmutableMap
                 .<LogicalDatastoreType, DOMStore> builder().put(LogicalDatastoreType.OPERATIONAL, operStore)
                 .put(LogicalDatastoreType.CONFIGURATION, configStore).build();
 
+        /*
+         * We use a single-threaded executor for commits with a bounded queue capacity. If the
+         * queue capacity is reached, subsequent commit tasks will be rejected and the commits will
+         * fail. This is done to relieve back pressure. This should be an extreme scenario - either
+         * there's deadlock(s) somewhere and the controller is unstable or some rogue component is
+         * continuously hammering commits too fast or the controller is just over-capacity for the
+         * system it's running on.
+         */
+        ExecutorService commitExecutor = SpecialExecutors.newBoundedSingleThreadExecutor(
+                PropertyUtils.getIntSystemProperty(
+                        COMMIT_EXECUTOR_MAX_QUEUE_SIZE_PROP,
+                        DEFAULT_COMMIT_EXECUTOR_MAX_QUEUE_SIZE), "WriteTxCommit");
+
+        /*
+         * We use an executor for commit ListenableFuture callbacks that favors reusing available
+         * threads over creating new threads at the expense of execution time. The assumption is
+         * that most ListenableFuture callbacks won't execute a lot of business logic where we want
+         * it to run quicker - many callbacks will likely just handle error conditions and do
+         * nothing on success. The executor queue capacity is bounded and, if the capacity is
+         * reached, subsequent submitted tasks will block the caller.
+         */
+        Executor listenableFutureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(
+                PropertyUtils.getIntSystemProperty(
+                        FUTURE_CALLBACK_EXECUTOR_MAX_POOL_SIZE_PROP,
+                        DEFAULT_FUTURE_CALLBACK_EXECUTOR_MAX_POOL_SIZE),
+                PropertyUtils.getIntSystemProperty(
+                        FUTURE_CALLBACK_EXECUTOR_MAX_QUEUE_SIZE_PROP,
+                        DEFAULT_FUTURE_CALLBACK_EXECUTOR_MAX_QUEUE_SIZE), "CommitFutures");
+
         DOMDataBrokerImpl newDataBroker = new DOMDataBrokerImpl(datastores,
-                new DeadlockDetectingListeningExecutorService(Executors.newSingleThreadExecutor(),
-                                              TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION));
+                new DeadlockDetectingListeningExecutorService(commitExecutor,
+                    TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION,
+                    listenableFutureExecutor));
 
         return newDataBroker;
     }
index 9a6d12fb1872fc0a96556b06133a48894e14a532..521e2d0e731af06ac972ce2cce28f75a347ba490 100644 (file)
@@ -9,6 +9,7 @@ package org.opendaylight.controller.md.sal.dom.broker.impl;
 import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.RejectedExecutionException;
 
 import javax.annotation.concurrent.GuardedBy;
 
@@ -86,8 +87,18 @@ public class DOMDataCommitCoordinatorImpl implements DOMDataCommitExecutor {
         Preconditions.checkArgument(cohorts != null, "Cohorts must not be null.");
         Preconditions.checkArgument(listener != null, "Listener must not be null");
         LOG.debug("Tx: {} is submitted for execution.", transaction.getIdentifier());
-        ListenableFuture<Void> commitFuture = executor.submit(new CommitCoordinationTask(
-                transaction, cohorts, listener));
+
+        ListenableFuture<Void> commitFuture = null;
+        try {
+            commitFuture = executor.submit(new CommitCoordinationTask(transaction, cohorts, listener));
+        } catch(RejectedExecutionException e) {
+            LOG.error("The commit executor's queue is full - submit task was rejected. \n" +
+                      executor, e);
+            return Futures.immediateFailedCheckedFuture(
+                    new TransactionCommitFailedException(
+                        "Could not submit the commit task - the commit queue capacity has been exceeded.", e));
+        }
+
         if (listener.isPresent()) {
             Futures.addCallback(commitFuture, new DOMDataCommitErrorInvoker(transaction, listener.get()));
         }
index b4562cf2eccd2c8b6560552c510fe1bfeb8d9989..5e2a417d28ce22acc6a19a1f556f7ea2c95a4382 100644 (file)
@@ -40,6 +40,12 @@ class DOMForwardedReadOnlyTransaction extends
         return getSubtransaction(store).read(path);
     }
 
+    @Override public CheckedFuture<Boolean, ReadFailedException> exists(
+        LogicalDatastoreType store,
+        YangInstanceIdentifier path) {
+        return getSubtransaction(store).exists(path);
+    }
+
     @Override
     public void close() {
         closeSubtransactions();
index 74a4c52e36ff33883754b0ef5543706054134967..67351ec94583cda374f5b0948eafd1089fd98510 100644 (file)
@@ -50,4 +50,10 @@ class DOMForwardedReadWriteTransaction extends DOMForwardedWriteTransaction<DOMS
             final LogicalDatastoreType store, final YangInstanceIdentifier path) {
         return getSubtransaction(store).read(path);
     }
-}
\ No newline at end of file
+
+    @Override public CheckedFuture<Boolean, ReadFailedException> exists(
+        LogicalDatastoreType store,
+        YangInstanceIdentifier path) {
+        return getSubtransaction(store).exists(path);
+    }
+}
index 5bd8a7bc021f935fd20251892101f861e2c2db39..fb72b5a99a130efbcfcd0ea04056ef61978f2e06 100644 (file)
@@ -15,13 +15,6 @@ import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.JdkFutureAdapters;
 import com.google.common.util.concurrent.ListenableFuture;
-
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-
-import javax.annotation.Nullable;
-
 import org.opendaylight.controller.md.sal.common.api.RegistrationListener;
 import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
 import org.opendaylight.controller.md.sal.common.api.data.DataCommitHandler;
@@ -78,6 +71,12 @@ import org.opendaylight.yangtools.yang.model.api.Module;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
 
+import javax.annotation.Nullable;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ExecutionException;
+
 public class BackwardsCompatibleMountPoint implements MountProvisionInstance, SchemaContextProvider, SchemaService {
 
     private final DataProviderService dataReader;
@@ -405,6 +404,16 @@ public class BackwardsCompatibleMountPoint implements MountProvisionInstance, Sc
                 final Optional<NormalizedNode<?, ?>> normalizedNodeOptional = Optional.<NormalizedNode<?, ?>>fromNullable(normalized.getValue());
                 return Futures.immediateCheckedFuture(normalizedNodeOptional);
             }
+
+            @Override public CheckedFuture<Boolean, ReadFailedException> exists(LogicalDatastoreType store,
+                YangInstanceIdentifier path) {
+
+                try {
+                    return Futures.immediateCheckedFuture(read(store, path).get().isPresent());
+                } catch (InterruptedException | ExecutionException e) {
+                    return Futures.immediateFailedCheckedFuture(new ReadFailedException("Exists failed",e));
+                }
+            }
         }
 
         @VisibleForTesting
@@ -518,6 +527,16 @@ public class BackwardsCompatibleMountPoint implements MountProvisionInstance, Sc
                 return new BackwardsCompatibleReadTransaction(dataReader, dataNormalizer).read(store, path);
             }
 
+            @Override public CheckedFuture<Boolean, ReadFailedException> exists(LogicalDatastoreType store,
+                YangInstanceIdentifier path) {
+
+                try {
+                    return Futures.immediateCheckedFuture(read(store, path).get().isPresent());
+                } catch (InterruptedException | ExecutionException e) {
+                    return Futures.immediateFailedCheckedFuture(new ReadFailedException("Exists failed",e));
+                }
+            }
+
             @Override
             public boolean cancel() {
                 return delegateWriteTx.cancel();
index 19ff03b7d2ca14fa4facb72d855bbf2b14f77b7d..c8e3c0b6e0ca535e4e10ab464c47c2f00c8f220a 100644 (file)
@@ -8,7 +8,9 @@
 package org.opendaylight.controller.sal.dom.broker.impl;
 
 import static com.google.common.base.Preconditions.checkArgument;
-import static com.google.common.base.Preconditions.checkState;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.ListenableFuture;
 
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
@@ -22,11 +24,8 @@ import org.opendaylight.yangtools.concepts.Identifiable;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.SimpleNode;
-
-import com.google.common.collect.ImmutableSet;
-import com.google.common.util.concurrent.ListenableFuture;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 
 class RoutedRpcSelector implements RpcImplementation, AutoCloseable, Identifiable<RpcRoutingContext> {
 
@@ -81,9 +80,9 @@ class RoutedRpcSelector implements RpcImplementation, AutoCloseable, Identifiabl
         }
         if (potential == null) {
             return router.invokeRpc(rpc, (YangInstanceIdentifier) route, input);
+        } else {
+            return potential.invokeRpc(rpc, input);
         }
-        checkState(potential != null, "No implementation is available for rpc:%s path:%s", rpc, route);
-        return potential.invokeRpc(rpc, input);
     }
 
     public void addPath(final QName context, final YangInstanceIdentifier path, final RoutedRpcRegImpl routedRpcRegImpl) {
index 1ba6594563b0942882a26d7d93ee136c33742dd5..b4d7d2d00109b394f36c1ef138176da4fda364ab 100644 (file)
@@ -10,6 +10,13 @@ package org.opendaylight.controller.sal.dom.broker.impl;
 import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Preconditions.checkState;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.FluentIterable;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
@@ -22,6 +29,7 @@ import org.opendaylight.controller.sal.core.api.Broker.RoutedRpcRegistration;
 import org.opendaylight.controller.sal.core.api.Broker.RpcRegistration;
 import org.opendaylight.controller.sal.core.api.RoutedRpcDefaultImplementation;
 import org.opendaylight.controller.sal.core.api.RpcImplementation;
+import org.opendaylight.controller.sal.core.api.RpcImplementationUnavailableException;
 import org.opendaylight.controller.sal.core.api.RpcRegistrationListener;
 import org.opendaylight.controller.sal.core.api.RpcRoutingContext;
 import org.opendaylight.controller.sal.dom.broker.spi.RpcRouter;
@@ -38,12 +46,9 @@ import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.FluentIterable;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.ImmutableSet;
-import com.google.common.util.concurrent.ListenableFuture;
-
+/**
+ * RPC broker responsible for routing requests to remote systems.
+ */
 public class SchemaAwareRpcBroker implements RpcRouter, Identifiable<String>, RoutedRpcDefaultImplementation {
 
     private static final Logger LOG = LoggerFactory.getLogger(SchemaAwareRpcBroker.class);
@@ -217,8 +222,12 @@ public class SchemaAwareRpcBroker implements RpcRouter, Identifiable<String>, Ro
 
     @Override
     public ListenableFuture<RpcResult<CompositeNode>> invokeRpc(final QName rpc, final YangInstanceIdentifier route, final CompositeNode input) {
-      checkState(defaultDelegate != null, "No implementation is available for rpc:%s path:%s", rpc, route);
-      return defaultDelegate.invokeRpc(rpc, route, input);
+        if (defaultDelegate == null) {
+            return Futures.immediateFailedCheckedFuture(new RpcImplementationUnavailableException("No RPC implementation found"));
+        }
+
+        LOG.debug("Forwarding RPC {} path {} to delegate {}", rpc, route);
+        return defaultDelegate.invokeRpc(rpc, route, input);
     }
 
     void remove(final GlobalRpcRegistration registration) {
index 181396fc884699e07eef9e0778e1ce3655d5e721..e9ed5b1b303592c9f8b59d0a7a1145bebeb3e716 100644 (file)
@@ -63,8 +63,10 @@ public class DOMBrokerPerformanceTest {
 
     @Before
     public void setupStore() {
-       InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
-       InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.sameThreadExecutor());
+        InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER",
+                 MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
+        InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG",
+                 MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
         schemaContext = TestModel.createTestContext();
 
         operStore.onGlobalContextUpdated(schemaContext);
index 0bb16a39b90f7eb513093b18faa20815061fad3c..e57d08f1737fde07dc455eabfc53c2e5304cd53f 100644 (file)
@@ -7,19 +7,24 @@ import static org.junit.Assert.assertEquals;
 import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
 import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
 
+import java.util.Collections;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mockito;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitDeadlockException;
+import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataChangeListener;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataReadTransaction;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataReadWriteTransaction;
@@ -28,6 +33,7 @@ import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
 import org.opendaylight.controller.md.sal.dom.store.impl.TestModel;
 import org.opendaylight.controller.sal.core.spi.data.DOMStore;
 import org.opendaylight.yangtools.util.concurrent.DeadlockDetectingListeningExecutorService;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
@@ -35,6 +41,7 @@ import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
 import com.google.common.base.Optional;
 import com.google.common.collect.ImmutableMap;
+import com.google.common.util.concurrent.ForwardingExecutorService;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
@@ -46,11 +53,16 @@ public class DOMBrokerTest {
     private SchemaContext schemaContext;
     private DOMDataBrokerImpl domBroker;
     private ListeningExecutorService executor;
+    private ExecutorService futureExecutor;
+    private CommitExecutorService commitExecutor;
 
     @Before
     public void setupStore() {
-        InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
-        InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.sameThreadExecutor());
+
+        InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER",
+                MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
+        InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG",
+                MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
         schemaContext = TestModel.createTestContext();
 
         operStore.onGlobalContextUpdated(schemaContext);
@@ -61,8 +73,10 @@ public class DOMBrokerTest {
                 .put(OPERATIONAL, operStore) //
                 .build();
 
-        executor = new DeadlockDetectingListeningExecutorService(Executors.newSingleThreadExecutor(),
-                                          TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION);
+        commitExecutor = new CommitExecutorService(Executors.newSingleThreadExecutor());
+        futureExecutor = SpecialExecutors.newBlockingBoundedCachedThreadPool(1, 5, "FCB");
+        executor = new DeadlockDetectingListeningExecutorService(commitExecutor,
+                TransactionCommitDeadlockException.DEADLOCK_EXECUTOR_FUNCTION, futureExecutor);
         domBroker = new DOMDataBrokerImpl(stores, executor);
     }
 
@@ -71,6 +85,10 @@ public class DOMBrokerTest {
         if( executor != null ) {
             executor.shutdownNow();
         }
+
+        if(futureExecutor != null) {
+            futureExecutor.shutdownNow();
+        }
     }
 
     @Test(timeout=10000)
@@ -137,6 +155,24 @@ public class DOMBrokerTest {
         assertTrue(afterCommitRead.isPresent());
     }
 
+    @Test(expected=TransactionCommitFailedException.class)
+    public void testRejectedCommit() throws Exception {
+
+        commitExecutor.delegate = Mockito.mock( ExecutorService.class );
+        Mockito.doThrow( new RejectedExecutionException( "mock" ) )
+            .when( commitExecutor.delegate ).execute( Mockito.any( Runnable.class ) );
+        Mockito.doNothing().when( commitExecutor.delegate ).shutdown();
+        Mockito.doReturn( Collections.emptyList() ).when( commitExecutor.delegate ).shutdownNow();
+        Mockito.doReturn( "" ).when( commitExecutor.delegate ).toString();
+        Mockito.doReturn( true ).when( commitExecutor.delegate )
+            .awaitTermination( Mockito.anyLong(), Mockito.any( TimeUnit.class ) );
+
+        DOMDataReadWriteTransaction writeTx = domBroker.newReadWriteTransaction();
+        writeTx.put( OPERATIONAL, TestModel.TEST_PATH, ImmutableNodes.containerNode(TestModel.TEST_QNAME) );
+
+        writeTx.submit().checkedGet( 5, TimeUnit.SECONDS );
+    }
+
     /**
      * Tests a simple DataChangeListener notification after a write.
      */
@@ -306,4 +342,18 @@ public class DOMBrokerTest {
             assertTrue( "onDataChanged was not called", latch.await( 5, TimeUnit.SECONDS ) );
         }
     }
+
+    static class CommitExecutorService extends ForwardingExecutorService {
+
+        ExecutorService delegate;
+
+        public CommitExecutorService( ExecutorService delegate ) {
+            this.delegate = delegate;
+        }
+
+        @Override
+        protected ExecutorService delegate() {
+            return delegate;
+        }
+    }
 }
index 3ea0bcefa5bab97ea12a9ead64e40cce49b78277..18b11c8300ab37a526a2018c1285a9a9817f7d3a 100644 (file)
@@ -44,8 +44,10 @@ public class DOMTransactionChainTest {
 
     @Before
     public void setupStore() {
-        InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER", MoreExecutors.sameThreadExecutor());
-        InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG", MoreExecutors.sameThreadExecutor());
+        InMemoryDOMDataStore operStore = new InMemoryDOMDataStore("OPER",
+                MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
+        InMemoryDOMDataStore configStore = new InMemoryDOMDataStore("CFG",
+                MoreExecutors.sameThreadExecutor(), MoreExecutors.sameThreadExecutor());
         schemaContext = TestModel.createTestContext();
 
         operStore.onGlobalContextUpdated(schemaContext);
index 84d09c7cb024a275ebfdfa6edd8a75af00abc3af..719a6f0499d4423040e5547b82a3bc957c30abbe 100644 (file)
@@ -34,4 +34,20 @@ public interface DOMStoreReadTransaction extends DOMStoreTransaction {
      *         </ul>
      */
     CheckedFuture<Optional<NormalizedNode<?,?>>, ReadFailedException> read(YangInstanceIdentifier path);
+
+    /**
+     * Checks if data is available in the logical data store located at provided path
+     *
+     * @param path
+     *            Path which uniquely identifies subtree which client want to
+     *            check existence of
+     * @return a CheckFuture containing the result of the check.
+     *         <ul>
+     *         <li>If the data at the supplied path exists, the Future returns a Boolean
+     *         whose value is true, false otherwise</li>
+     *         <li>If checking for the data fails, the Future will fail with a
+     *         {@link ReadFailedException} or an exception derived from ReadFailedException.</li>
+     *         </ul>
+     */
+    CheckedFuture<Boolean, ReadFailedException> exists(YangInstanceIdentifier path);
 }
index 805608d479e0e76d1ecebb6a7398a884e11498cb..39a448ff6c861ac4db998bf0d17f4ec4d8e22d6d 100644 (file)
@@ -1,12 +1,9 @@
 package org.opendaylight.controller.config.yang.inmemory_datastore_provider;
 
-import java.util.concurrent.Executors;
-
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-
-import com.google.common.util.concurrent.MoreExecutors;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
 
 public class InMemoryConfigDataStoreProviderModule extends org.opendaylight.controller.config.yang.inmemory_datastore_provider.AbstractInMemoryConfigDataStoreProviderModule {
+
     public InMemoryConfigDataStoreProviderModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
         super(identifier, dependencyResolver);
     }
@@ -22,9 +19,7 @@ public class InMemoryConfigDataStoreProviderModule extends org.opendaylight.cont
 
     @Override
     public java.lang.AutoCloseable createInstance() {
-      InMemoryDOMDataStore   ids = new InMemoryDOMDataStore("DOM-CFG", MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor()));
-      getSchemaServiceDependency().registerSchemaContextListener(ids);
-      return ids;
+        return InMemoryDOMDataStoreFactory.create("DOM-CFG", getSchemaServiceDependency());
     }
 
 }
index f4795588ab61ef62a9f74f6eb1530da1b6307141..615fe0211c0cbba8c1bc5c1a5687dd1ccea8dc33 100644 (file)
@@ -1,12 +1,9 @@
 package org.opendaylight.controller.config.yang.inmemory_datastore_provider;
 
-import java.util.concurrent.Executors;
-
-import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
-
-import com.google.common.util.concurrent.MoreExecutors;
+import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
 
 public class InMemoryOperationalDataStoreProviderModule extends org.opendaylight.controller.config.yang.inmemory_datastore_provider.AbstractInMemoryOperationalDataStoreProviderModule {
+
     public InMemoryOperationalDataStoreProviderModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
         super(identifier, dependencyResolver);
     }
@@ -22,9 +19,7 @@ public class InMemoryOperationalDataStoreProviderModule extends org.opendaylight
 
     @Override
     public java.lang.AutoCloseable createInstance() {
-      InMemoryDOMDataStore ids = new InMemoryDOMDataStore("DOM-OPER", MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor()));
-      getOperationalSchemaServiceDependency().registerSchemaContextListener(ids);
-      return ids;
+        return InMemoryDOMDataStoreFactory.create("DOM-OPER", getOperationalSchemaServiceDependency());
     }
 
 }
index 27325d84a9ddf3ecf11133ab2d55137477b3fe25..ac1f2e32d531dca0074290673d9399bb90074fd9 100644 (file)
@@ -8,6 +8,8 @@
 package org.opendaylight.controller.md.sal.dom.store.impl;
 
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.yangtools.util.concurrent.NotificationManager;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.slf4j.Logger;
@@ -16,31 +18,33 @@ import org.slf4j.LoggerFactory;
 class ChangeListenerNotifyTask implements Runnable {
 
     private static final Logger LOG = LoggerFactory.getLogger(ChangeListenerNotifyTask.class);
+
     private final Iterable<? extends DataChangeListenerRegistration<?>> listeners;
     private final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> event;
 
+    @SuppressWarnings("rawtypes")
+    private final NotificationManager<AsyncDataChangeListener,AsyncDataChangeEvent>
+                                                                            notificationMgr;
+
+    @SuppressWarnings("rawtypes")
     public ChangeListenerNotifyTask(final Iterable<? extends DataChangeListenerRegistration<?>> listeners,
-            final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> event) {
+            final AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> event,
+            final NotificationManager<AsyncDataChangeListener,AsyncDataChangeEvent> notificationMgr) {
         this.listeners = listeners;
         this.event = event;
+        this.notificationMgr = notificationMgr;
     }
 
     @Override
     public void run() {
 
         for (DataChangeListenerRegistration<?> listener : listeners) {
-            try {
-                listener.getInstance().onDataChanged(event);
-            } catch (Exception e) {
-                LOG.error("Unhandled exception during invoking listener {} with event {}", listener, event, e);
-            }
+            notificationMgr.submitNotification(listener.getInstance(), event);
         }
-
     }
 
     @Override
     public String toString() {
         return "ChangeListenerNotifyTask [listeners=" + listeners + ", event=" + event + "]";
     }
-
 }
index c44d0909d688773d0cc37034cae21541f0823e6f..b61b3671034601fc09d7658280f986d4d30cc3ce 100644 (file)
@@ -13,11 +13,17 @@ import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ListeningExecutorService;
+
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
 import org.opendaylight.controller.md.sal.common.api.data.OptimisticLockFailedException;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
 import org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
+import org.opendaylight.yangtools.util.ExecutorServiceUtil;
+import org.opendaylight.yangtools.util.PropertyUtils;
+import org.opendaylight.yangtools.util.concurrent.NotificationManager;
+import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.ConflictingModificationAppliedException;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTree;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
@@ -43,8 +49,11 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.concurrent.GuardedBy;
+
 import java.util.Collections;
 import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
 import static com.google.common.base.Preconditions.checkState;
@@ -61,16 +70,51 @@ import static com.google.common.base.Preconditions.checkState;
 public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, SchemaContextListener,
         TransactionReadyPrototype,AutoCloseable {
     private static final Logger LOG = LoggerFactory.getLogger(InMemoryDOMDataStore.class);
+
+    @SuppressWarnings("rawtypes")
+    private static final QueuedNotificationManager.Invoker<AsyncDataChangeListener,
+                                       AsyncDataChangeEvent> DCL_NOTIFICATION_MGR_INVOKER =
+            new QueuedNotificationManager.Invoker<AsyncDataChangeListener,
+                                                  AsyncDataChangeEvent>() {
+
+                @SuppressWarnings("unchecked")
+                @Override
+                public void invokeListener( AsyncDataChangeListener listener,
+                                            AsyncDataChangeEvent notification ) {
+                    listener.onDataChanged(notification);
+                }
+            };
+
+    private static final String DCL_NOTIFICATION_MGR_MAX_QUEUE_SIZE_PROP =
+            "mdsal.datastore-dcl-notification-queue.size";
+
+    private static final int DEFAULT_DCL_NOTIFICATION_MGR_MAX_QUEUE_SIZE = 1000;
+
     private final DataTree dataTree = InMemoryDataTreeFactory.getInstance().create();
     private final ListenerTree listenerTree = ListenerTree.create();
     private final AtomicLong txCounter = new AtomicLong(0);
-    private final ListeningExecutorService executor;
+    private final ListeningExecutorService listeningExecutor;
+
+    @SuppressWarnings("rawtypes")
+    private final NotificationManager<AsyncDataChangeListener,AsyncDataChangeEvent>
+                                                              dataChangeListenerNotificationManager;
+    private final ExecutorService dataChangeListenerExecutor;
 
     private final String name;
 
-    public InMemoryDOMDataStore(final String name, final ListeningExecutorService executor) {
+    public InMemoryDOMDataStore(final String name, final ListeningExecutorService listeningExecutor,
+            final ExecutorService dataChangeListenerExecutor) {
         this.name = Preconditions.checkNotNull(name);
-        this.executor = Preconditions.checkNotNull(executor);
+        this.listeningExecutor = Preconditions.checkNotNull(listeningExecutor);
+
+        this.dataChangeListenerExecutor = Preconditions.checkNotNull(dataChangeListenerExecutor);
+
+        int maxDCLQueueSize = PropertyUtils.getIntSystemProperty(
+                DCL_NOTIFICATION_MGR_MAX_QUEUE_SIZE_PROP, DEFAULT_DCL_NOTIFICATION_MGR_MAX_QUEUE_SIZE );
+
+        dataChangeListenerNotificationManager =
+                new QueuedNotificationManager<>(this.dataChangeListenerExecutor,
+                        DCL_NOTIFICATION_MGR_INVOKER, maxDCLQueueSize, "DataChangeListenerQueueMgr");
     }
 
     @Override
@@ -104,8 +148,9 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
     }
 
     @Override
-    public void close(){
-        executor.shutdownNow();
+    public void close() {
+        ExecutorServiceUtil.tryGracefulShutdown(listeningExecutor, 30, TimeUnit.SECONDS);
+        ExecutorServiceUtil.tryGracefulShutdown(dataChangeListenerExecutor, 30, TimeUnit.SECONDS);
     }
     @Override
     public <L extends AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> ListenerRegistration<L> registerChangeListener(
@@ -132,7 +177,9 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
                         .setAfter(data) //
                         .addCreated(path, data) //
                         .build();
-                executor.submit(new ChangeListenerNotifyTask(Collections.singletonList(reg), event));
+
+                new ChangeListenerNotifyTask(Collections.singletonList(reg), event,
+                        dataChangeListenerNotificationManager).run();
             }
         }
 
@@ -221,8 +268,9 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
         @Override
         public void close() {
 
-             executor.shutdownNow();
-
+            // FIXME: this call doesn't look right here - listeningExecutor is shared and owned
+            // by the outer class.
+            //listeningExecutor.shutdownNow();
         }
 
         protected synchronized void onTransactionFailed(final SnapshotBackedWriteTransaction transaction,
@@ -308,7 +356,7 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
 
         @Override
         public ListenableFuture<Boolean> canCommit() {
-            return executor.submit(new Callable<Boolean>() {
+            return listeningExecutor.submit(new Callable<Boolean>() {
                 @Override
                 public Boolean call() throws TransactionCommitFailedException {
                     try {
@@ -330,11 +378,12 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
 
         @Override
         public ListenableFuture<Void> preCommit() {
-            return executor.submit(new Callable<Void>() {
+            return listeningExecutor.submit(new Callable<Void>() {
                 @Override
                 public Void call() {
                     candidate = dataTree.prepare(modification);
-                    listenerResolver = ResolveDataChangeEventsTask.create(candidate, listenerTree);
+                    listenerResolver = ResolveDataChangeEventsTask.create(candidate, listenerTree,
+                            dataChangeListenerNotificationManager);
                     return null;
                 }
             });
@@ -359,7 +408,7 @@ public class InMemoryDOMDataStore implements DOMStore, Identifiable<String>, Sch
 
                 for (ChangeListenerNotifyTask task : listenerResolver.call()) {
                     LOG.trace("Scheduling invocation of listeners: {}", task);
-                    executor.submit(task);
+                    task.run();
                 }
             }
 
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreFactory.java b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStoreFactory.java
new file mode 100644 (file)
index 0000000..c853a13
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.sal.dom.store.impl;
+
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import javax.annotation.Nullable;
+
+import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
+import org.opendaylight.yangtools.util.PropertyUtils;
+import com.google.common.util.concurrent.MoreExecutors;
+
+/**
+ * A factory for creating InMemoryDOMDataStore instances.
+ *
+ * @author Thomas Pantelis
+ */
+public final class InMemoryDOMDataStoreFactory {
+
+    private static final String DCL_EXECUTOR_MAX_QUEUE_SIZE_PROP =
+            "mdsal.datastore-dcl-notification-queue.size";
+    private static final int DEFAULT_DCL_EXECUTOR_MAX_QUEUE_SIZE = 1000;
+
+    private static final String DCL_EXECUTOR_MAX_POOL_SIZE_PROP =
+            "mdsal.datastore-dcl-notification-pool.size";
+    private static final int DEFAULT_DCL_EXECUTOR_MAX_POOL_SIZE = 20;
+
+    private InMemoryDOMDataStoreFactory() {
+    }
+
+    /**
+     * Creates an InMemoryDOMDataStore instance.
+     *
+     * @param name the name of the data store
+     * @param schemaService the SchemaService to which to register the data store.
+     * @return an InMemoryDOMDataStore instance
+     */
+    public static InMemoryDOMDataStore create(final String name,
+            @Nullable final SchemaService schemaService) {
+
+        // For DataChangeListener notifications we use an executor that provides the fastest
+        // task execution time to get higher throughput as DataChangeListeners typically provide
+        // much of the business logic for a data model. If the executor queue size limit is reached,
+        // subsequent submitted notifications will block the calling thread.
+
+        int dclExecutorMaxQueueSize = PropertyUtils.getIntSystemProperty(
+                DCL_EXECUTOR_MAX_QUEUE_SIZE_PROP, DEFAULT_DCL_EXECUTOR_MAX_QUEUE_SIZE);
+        int dclExecutorMaxPoolSize = PropertyUtils.getIntSystemProperty(
+                DCL_EXECUTOR_MAX_POOL_SIZE_PROP, DEFAULT_DCL_EXECUTOR_MAX_POOL_SIZE);
+
+        ExecutorService dataChangeListenerExecutor = SpecialExecutors.newBlockingBoundedFastThreadPool(
+                dclExecutorMaxPoolSize, dclExecutorMaxQueueSize, name + "-DCL" );
+
+        InMemoryDOMDataStore dataStore = new InMemoryDOMDataStore(name,
+                MoreExecutors.listeningDecorator(Executors.newSingleThreadExecutor()),
+                dataChangeListenerExecutor);
+
+        if(schemaService != null) {
+            schemaService.registerSchemaContextListener(dataStore);
+        }
+
+        return dataStore;
+    }
+}
index 3ddf0b60faf07323f7f26cb4d7488851015ef688..d8feaa71f6ac104132f14c0659677f566ee530c5 100644 (file)
@@ -24,12 +24,15 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.concurrent.Callable;
 
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
 import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.Builder;
 import org.opendaylight.controller.md.sal.dom.store.impl.DOMImmutableDataChangeEvent.SimpleEventFactory;
 import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree;
 import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.Node;
 import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.Walker;
+import org.opendaylight.yangtools.util.concurrent.NotificationManager;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
@@ -57,9 +60,15 @@ final class ResolveDataChangeEventsTask implements Callable<Iterable<ChangeListe
     private final DataTreeCandidate candidate;
     private final ListenerTree listenerRoot;
 
-    public ResolveDataChangeEventsTask(final DataTreeCandidate candidate, final ListenerTree listenerTree) {
+    @SuppressWarnings("rawtypes")
+    private final NotificationManager<AsyncDataChangeListener, AsyncDataChangeEvent> notificationMgr;
+
+    @SuppressWarnings("rawtypes")
+    public ResolveDataChangeEventsTask(final DataTreeCandidate candidate, final ListenerTree listenerTree,
+            final NotificationManager<AsyncDataChangeListener, AsyncDataChangeEvent> notificationMgr) {
         this.candidate = Preconditions.checkNotNull(candidate);
         this.listenerRoot = Preconditions.checkNotNull(listenerTree);
+        this.notificationMgr = Preconditions.checkNotNull(notificationMgr);
     }
 
     /**
@@ -120,7 +129,7 @@ final class ResolveDataChangeEventsTask implements Callable<Iterable<ChangeListe
      * @param listeners
      * @param entries
      */
-    private static void addNotificationTask(final ImmutableList.Builder<ChangeListenerNotifyTask> taskListBuilder,
+    private void addNotificationTask(final ImmutableList.Builder<ChangeListenerNotifyTask> taskListBuilder,
             final ListenerTree.Node listeners, final Collection<DOMImmutableDataChangeEvent> entries) {
 
         if (!entries.isEmpty()) {
@@ -141,7 +150,7 @@ final class ResolveDataChangeEventsTask implements Callable<Iterable<ChangeListe
      * @param listeners
      * @param event
      */
-    private static void addNotificationTaskByScope(
+    private void addNotificationTaskByScope(
             final ImmutableList.Builder<ChangeListenerNotifyTask> taskListBuilder, final ListenerTree.Node listeners,
             final DOMImmutableDataChangeEvent event) {
         DataChangeScope eventScope = event.getScope();
@@ -150,11 +159,11 @@ final class ResolveDataChangeEventsTask implements Callable<Iterable<ChangeListe
             List<DataChangeListenerRegistration<?>> listenerSet = Collections
                     .<DataChangeListenerRegistration<?>> singletonList(listenerReg);
             if (eventScope == DataChangeScope.BASE) {
-                taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event));
+                taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event, notificationMgr));
             } else if (eventScope == DataChangeScope.ONE && listenerScope != DataChangeScope.BASE) {
-                taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event));
+                taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event, notificationMgr));
             } else if (eventScope == DataChangeScope.SUBTREE && listenerScope == DataChangeScope.SUBTREE) {
-                taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event));
+                taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event, notificationMgr));
             }
         }
     }
@@ -172,7 +181,7 @@ final class ResolveDataChangeEventsTask implements Callable<Iterable<ChangeListe
      * @param listeners
      * @param entries
      */
-    private static void addNotificationTasksAndMergeEvents(
+    private void addNotificationTasksAndMergeEvents(
             final ImmutableList.Builder<ChangeListenerNotifyTask> taskListBuilder, final ListenerTree.Node listeners,
             final Collection<DOMImmutableDataChangeEvent> entries) {
 
@@ -210,14 +219,14 @@ final class ResolveDataChangeEventsTask implements Callable<Iterable<ChangeListe
         }
     }
 
-    private static void addNotificationTaskExclusively(
+    private void addNotificationTaskExclusively(
             final ImmutableList.Builder<ChangeListenerNotifyTask> taskListBuilder, final Node listeners,
             final DOMImmutableDataChangeEvent event) {
         for (DataChangeListenerRegistration<?> listener : listeners.getListeners()) {
             if (listener.getScope() == event.getScope()) {
                 Set<DataChangeListenerRegistration<?>> listenerSet = Collections
                         .<DataChangeListenerRegistration<?>> singleton(listener);
-                taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event));
+                taskListBuilder.add(new ChangeListenerNotifyTask(listenerSet, event, notificationMgr));
             }
         }
     }
@@ -519,7 +528,10 @@ final class ResolveDataChangeEventsTask implements Callable<Iterable<ChangeListe
         }
     }
 
-    public static ResolveDataChangeEventsTask create(final DataTreeCandidate candidate, final ListenerTree listenerTree) {
-        return new ResolveDataChangeEventsTask(candidate, listenerTree);
+    @SuppressWarnings("rawtypes")
+    public static ResolveDataChangeEventsTask create(final DataTreeCandidate candidate,
+            final ListenerTree listenerTree,
+            final NotificationManager<AsyncDataChangeListener,AsyncDataChangeEvent> notificationMgr) {
+        return new ResolveDataChangeEventsTask(candidate, listenerTree, notificationMgr);
     }
 }
index 2a9840634381efea5c76cfd220530a11a83d80f0..44ee61c116a9a4c5f8f336890827fcfceea68075 100644 (file)
@@ -7,19 +7,19 @@
  */
 package org.opendaylight.controller.md.sal.dom.store.impl;
 
-import static com.google.common.base.Preconditions.checkNotNull;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeSnapshot;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.Futures;
+import static com.google.common.base.Preconditions.checkNotNull;
 
 /**
  *
@@ -63,4 +63,16 @@ final class SnapshotBackedReadTransaction extends AbstractDOMStoreTransaction
             return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed",e));
         }
     }
-}
\ No newline at end of file
+
+    @Override public CheckedFuture<Boolean, ReadFailedException> exists(YangInstanceIdentifier path) {
+        LOG.debug("Tx: {} Exists: {}", getIdentifier(), path);
+        checkNotNull(path, "Path must not be null.");
+
+        try {
+            return Futures.immediateCheckedFuture(
+                read(path).checkedGet().isPresent());
+        } catch (ReadFailedException e) {
+            return Futures.immediateFailedCheckedFuture(e);
+        }
+    }
+}
index 5c5e9c6b6d82f4263959c6f8e137cc0bf1af25ad..ce7043fd4747542a98802a893d0d49c3c12b3de5 100644 (file)
@@ -61,4 +61,14 @@ class SnapshotBackedReadWriteTransaction extends SnapshotBackedWriteTransaction
             return Futures.immediateFailedCheckedFuture(new ReadFailedException("Read failed",e));
         }
     }
-}
\ No newline at end of file
+
+    @Override public CheckedFuture<Boolean, ReadFailedException> exists(
+        YangInstanceIdentifier path) {
+        try {
+            return Futures.immediateCheckedFuture(
+                read(path).checkedGet().isPresent());
+        } catch (ReadFailedException e) {
+            return Futures.immediateFailedCheckedFuture(e);
+        }
+    }
+}
index 39152767dd0b063c373adaed9fd5704b8702719c..ac7a31818720dcc494bfa2df714ea862bed2ba45 100644 (file)
@@ -144,7 +144,10 @@ public final class ListenerTree  {
 
     /**
      * A walking context, pretty much equivalent to an iterator, but it
-     * exposes the undelying tree structure.
+     * exposes the underlying tree structure.
+     */
+    /*
+     * FIXME: BUG-1511: split this class out as ListenerWalker.
      */
     public static final class Walker implements AutoCloseable {
         private final Lock lock;
@@ -177,6 +180,9 @@ public final class ListenerTree  {
      * only as long as the {@link org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree.Walker} instance through which it is reached remains
      * unclosed.
      */
+    /*
+     * FIXME: BUG-1511: split this class out as ListenerNode.
+     */
     public static final class Node implements StoreTreeNode<Node>, Identifiable<PathArgument> {
         private final Collection<DataChangeListenerRegistration<?>> listeners = new ArrayList<>();
         private final Map<PathArgument, Node> children = new HashMap<>();
index 3176ca764de198dac326dbfb9db51f6f2190ded8..76a9354d1aea79cde305b087a830687b75d91889 100644 (file)
@@ -9,7 +9,7 @@ package org.opendaylight.controller.md.sal.dom.store.impl;
 
 import java.util.Collection;
 import java.util.Map;
-
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.opendaylight.controller.md.sal.dom.store.impl.DatastoreTestTask.WriteTransactionCustomizer;
@@ -18,6 +18,7 @@ import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controll
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.top.level.list.NestedList;
 import org.opendaylight.yangtools.sal.binding.generator.impl.ModuleInfoBackedContext;
+import org.opendaylight.yangtools.util.concurrent.SpecialExecutors;
 import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
 import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
 import org.opendaylight.yangtools.yang.common.QName;
@@ -48,6 +49,7 @@ public abstract class AbstractDataChangeListenerTest {
 
     private InMemoryDOMDataStore datastore;
     private SchemaContext schemaContext;
+    private TestDCLExecutorService dclExecutorService;
 
     @Before
     public final void setup() throws Exception {
@@ -56,13 +58,24 @@ public abstract class AbstractDataChangeListenerTest {
         ModuleInfoBackedContext context = ModuleInfoBackedContext.create();
         context.registerModuleInfo(moduleInfo);
         schemaContext = context.tryToCreateSchemaContext().get();
+
+        dclExecutorService = new TestDCLExecutorService(
+                SpecialExecutors.newBlockingBoundedFastThreadPool(1, 10, "DCL" ));
+
         datastore = new InMemoryDOMDataStore("TEST",
-                MoreExecutors.sameThreadExecutor());
+                MoreExecutors.sameThreadExecutor(), dclExecutorService );
         datastore.onGlobalContextUpdated(schemaContext);
     }
 
+    @After
+    public void tearDown() {
+        if( dclExecutorService != null ) {
+            dclExecutorService.shutdownNow();
+        }
+    }
+
     public final DatastoreTestTask newTestTask() {
-        return new DatastoreTestTask(datastore).cleanup(DatastoreTestTask
+        return new DatastoreTestTask(datastore, dclExecutorService).cleanup(DatastoreTestTask
                 .simpleDelete(TOP_LEVEL));
     }
 
index 26987a6fba6426169ee7df98340c8ac0f0f4be47..98d79bee8bffaf40adacec43c95a665c5300b495 100644 (file)
@@ -8,9 +8,11 @@
 package org.opendaylight.controller.md.sal.dom.store.impl;
 
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
@@ -37,11 +39,13 @@ public class DatastoreTestTask {
     private WriteTransactionCustomizer cleanup;
     private YangInstanceIdentifier changePath;
     private DataChangeScope changeScope;
-    private boolean postSetup = false;
+    private volatile boolean postSetup = false;
     private final ChangeEventListener internalListener;
+    private final TestDCLExecutorService dclExecutorService;
 
-    public DatastoreTestTask(final DOMStore datastore) {
+    public DatastoreTestTask(final DOMStore datastore, final TestDCLExecutorService dclExecutorService) {
         this.store = datastore;
+        this.dclExecutorService = dclExecutorService;
         internalListener = new ChangeEventListener();
     }
 
@@ -79,7 +83,7 @@ public class DatastoreTestTask {
         return this;
     }
 
-    public void run() throws InterruptedException, ExecutionException {
+    public void run() throws InterruptedException, ExecutionException, TimeoutException {
         if (setup != null) {
             execute(setup);
         }
@@ -89,13 +93,17 @@ public class DatastoreTestTask {
         }
 
         Preconditions.checkState(write != null, "Write Transaction must be set.");
+
         postSetup = true;
+        dclExecutorService.afterTestSetup();
+
         execute(write);
         if (registration != null) {
             registration.close();
         }
+
         if (changeListener != null) {
-            changeListener.onDataChanged(internalListener.receivedChange.get());
+            changeListener.onDataChanged(getChangeEvent());
         }
         if (read != null) {
             read.verify(store.newReadOnlyTransaction());
@@ -105,8 +113,26 @@ public class DatastoreTestTask {
         }
     }
 
-    public Future<AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>>> getChangeEvent() {
-        return internalListener.receivedChange;
+    public AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> getChangeEvent() {
+        try {
+            return internalListener.receivedChange.get(10, TimeUnit.SECONDS);
+        } catch( Exception e ) {
+            fail( "Error getting the AsyncDataChangeEvent from the Future: " + e );
+        }
+
+        // won't get here
+        return null;
+    }
+
+    public void verifyNoChangeEvent() {
+        try {
+            Object unexpected = internalListener.receivedChange.get(500, TimeUnit.MILLISECONDS);
+            fail( "Got unexpected AsyncDataChangeEvent from the Future: " + unexpected );
+        } catch( TimeoutException e ) {
+            // Expected
+        } catch( Exception e ) {
+            fail( "Error getting the AsyncDataChangeEvent from the Future: " + e );
+        }
     }
 
     private void execute(final WriteTransactionCustomizer writeCustomizer) throws InterruptedException,
index 54d2043dc76a6df85cafdfe468f1bf7ff5b989f3..84337de419b2d24f82425fb889b89786e83027ce 100644 (file)
@@ -20,7 +20,7 @@ public abstract class DefaultDataChangeListenerTestSuite extends AbstractDataCha
     abstract protected void customizeTask(DatastoreTestTask task);
 
     @Test
-    public final void putTopLevelOneNested() throws InterruptedException, ExecutionException {
+    public final void putTopLevelOneNested() throws Exception {
 
         DatastoreTestTask task = newTestTask().test(writeOneTopMultipleNested(FOO, BAR));
         customizeTask(task);
@@ -29,7 +29,7 @@ public abstract class DefaultDataChangeListenerTestSuite extends AbstractDataCha
     }
 
     @Test
-    public final void existingTopWriteSibling() throws InterruptedException, ExecutionException {
+    public final void existingTopWriteSibling() throws Exception {
         DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO)).test(
                 new WriteTransactionCustomizer() {
                     @Override
@@ -46,7 +46,7 @@ public abstract class DefaultDataChangeListenerTestSuite extends AbstractDataCha
 
 
     @Test
-    public final void existingTopWriteTwoNested() throws InterruptedException, ExecutionException {
+    public final void existingTopWriteTwoNested() throws Exception {
         DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO)).test(
                 new WriteTransactionCustomizer() {
                     @Override
@@ -64,7 +64,7 @@ public abstract class DefaultDataChangeListenerTestSuite extends AbstractDataCha
 
 
     @Test
-    public final void existingOneNestedWriteAdditionalNested() throws InterruptedException, ExecutionException {
+    public final void existingOneNestedWriteAdditionalNested() throws Exception {
         DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO, BAR)).test(
                 new WriteTransactionCustomizer() {
                     @Override
@@ -79,11 +79,10 @@ public abstract class DefaultDataChangeListenerTestSuite extends AbstractDataCha
 
     protected abstract void existingOneNestedWriteAdditionalNested(DatastoreTestTask task) throws InterruptedException, ExecutionException;
 
-    protected abstract void putTopLevelOneNested(DatastoreTestTask task) throws InterruptedException,
-            ExecutionException;
+    protected abstract void putTopLevelOneNested(DatastoreTestTask task) throws Exception;
 
     @Test
-    public final void replaceTopLevelNestedChanged() throws InterruptedException, ExecutionException {
+    public final void replaceTopLevelNestedChanged() throws Exception {
         DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO, BAR)).test(
                 writeOneTopMultipleNested(FOO, BAZ));
         customizeTask(task);
@@ -95,7 +94,7 @@ public abstract class DefaultDataChangeListenerTestSuite extends AbstractDataCha
             ExecutionException;
 
     @Test
-    public final void putTopLevelWithTwoNested() throws InterruptedException, ExecutionException {
+    public final void putTopLevelWithTwoNested() throws Exception {
 
         DatastoreTestTask task = newTestTask().test(writeOneTopMultipleNested(FOO, BAR, BAZ));
         customizeTask(task);
@@ -107,7 +106,7 @@ public abstract class DefaultDataChangeListenerTestSuite extends AbstractDataCha
             ExecutionException;
 
     @Test
-    public final void twoNestedExistsOneIsDeleted() throws InterruptedException, ExecutionException {
+    public final void twoNestedExistsOneIsDeleted() throws Exception {
 
         DatastoreTestTask task = newTestTask().setup(writeOneTopMultipleNested(FOO, BAR, BAZ)).test(
                 deleteNested(FOO, BAZ));
@@ -120,7 +119,7 @@ public abstract class DefaultDataChangeListenerTestSuite extends AbstractDataCha
             ExecutionException;
 
     @Test
-    public final void nestedListExistsRootDeleted() throws InterruptedException, ExecutionException {
+    public final void nestedListExistsRootDeleted() throws Exception {
 
         DatastoreTestTask task = newTestTask().cleanup(null).setup(writeOneTopMultipleNested(FOO, BAR, BAZ))
                 .test(DatastoreTestTask.simpleDelete(TOP_LEVEL));
index 9b105aa3064121b518295220a5fb3351d030c13b..c609e13e791c375979ad99bce62ed27df9d321f8 100644 (file)
@@ -7,13 +7,10 @@
  */
 package org.opendaylight.controller.md.sal.dom.store.impl;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.util.concurrent.ExecutionException;
-
+import com.google.common.base.Optional;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.MoreExecutors;
 import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -35,9 +32,12 @@ import org.opendaylight.yangtools.yang.data.impl.schema.ImmutableNodes;
 import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
-import com.google.common.base.Optional;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.MoreExecutors;
+import java.util.concurrent.ExecutionException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
 
 public class InMemoryDataStoreTest {
@@ -47,7 +47,8 @@ public class InMemoryDataStoreTest {
 
     @Before
     public void setupStore() {
-        domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.sameThreadExecutor());
+        domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.sameThreadExecutor(),
+                MoreExecutors.sameThreadExecutor());
         schemaContext = TestModel.createTestContext();
         domStore.onGlobalContextUpdated(schemaContext);
     }
@@ -184,6 +185,74 @@ public class InMemoryDataStoreTest {
         assertEquals( "After commit read: data", containerNode, afterCommitRead.get() );
     }
 
+
+    @Test
+    public void testExistsForExistingData() throws Exception {
+
+        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+        assertNotNull( writeTx );
+
+        ContainerNode containerNode = ImmutableContainerNodeBuilder.create()
+            .withNodeIdentifier( new NodeIdentifier( TestModel.TEST_QNAME ) )
+            .addChild( ImmutableNodes.mapNodeBuilder( TestModel.OUTER_LIST_QNAME )
+                .addChild( ImmutableNodes.mapEntry( TestModel.OUTER_LIST_QNAME,
+                    TestModel.ID_QNAME, 1 ) ).build() ).build();
+
+        writeTx.merge( TestModel.TEST_PATH, containerNode );
+
+        CheckedFuture<Boolean, ReadFailedException> exists =
+            writeTx.exists(TestModel.TEST_PATH);
+
+        assertEquals(true, exists.checkedGet());
+
+        DOMStoreThreePhaseCommitCohort ready = writeTx.ready();
+
+        ready.preCommit().get();
+
+        ready.commit().get();
+
+        DOMStoreReadTransaction readTx = domStore.newReadOnlyTransaction();
+        assertNotNull( readTx );
+
+        exists =
+            readTx.exists(TestModel.TEST_PATH);
+
+        assertEquals(true, exists.checkedGet());
+    }
+
+    @Test
+    public void testExistsForNonExistingData() throws Exception {
+
+        DOMStoreReadWriteTransaction writeTx = domStore.newReadWriteTransaction();
+        assertNotNull( writeTx );
+
+        CheckedFuture<Boolean, ReadFailedException> exists =
+            writeTx.exists(TestModel.TEST_PATH);
+
+        assertEquals(false, exists.checkedGet());
+
+        DOMStoreReadTransaction readTx = domStore.newReadOnlyTransaction();
+        assertNotNull( readTx );
+
+        exists =
+            readTx.exists(TestModel.TEST_PATH);
+
+        assertEquals(false, exists.checkedGet());
+    }
+
+    @Test(expected=ReadFailedException.class)
+    public void testExistsThrowsReadFailedException() throws Exception {
+
+        DOMStoreReadTransaction readTx = domStore.newReadOnlyTransaction();
+        assertNotNull( readTx );
+
+        readTx.close();
+
+        readTx.exists(TestModel.TEST_PATH).checkedGet();
+    }
+
+
+
     @Test(expected=ReadFailedException.class)
     public void testReadWithReadOnlyTransactionClosed() throws Throwable {
 
index 905dc0d19b8c1f3b671de983b4dbda5de89526ce..43b339e506d48670b10b896183d11db8d3c83a2f 100644 (file)
@@ -23,7 +23,7 @@ public class RootScopeSubtreeTest extends DefaultDataChangeListenerTestSuite {
 
     @Override
     public void putTopLevelOneNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertContains(change.getCreatedData(), TOP_LEVEL, path(FOO), path(FOO, BAR));
         assertEmpty(change.getUpdatedData());
@@ -34,7 +34,7 @@ public class RootScopeSubtreeTest extends DefaultDataChangeListenerTestSuite {
     public void replaceTopLevelNestedChanged(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertContains(change.getCreatedData(), path(FOO, BAZ));
         assertContains(change.getUpdatedData(), TOP_LEVEL, path(FOO));
@@ -45,7 +45,7 @@ public class RootScopeSubtreeTest extends DefaultDataChangeListenerTestSuite {
     protected void putTopLevelWithTwoNested(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertContains(change.getCreatedData(), TOP_LEVEL, path(FOO), path(FOO, BAR), path(FOO, BAZ));
         assertEmpty(change.getUpdatedData());
@@ -56,7 +56,7 @@ public class RootScopeSubtreeTest extends DefaultDataChangeListenerTestSuite {
     protected void twoNestedExistsOneIsDeleted(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertEmpty(change.getCreatedData());
         assertContains(change.getUpdatedData(), TOP_LEVEL, path(FOO));
@@ -67,7 +67,7 @@ public class RootScopeSubtreeTest extends DefaultDataChangeListenerTestSuite {
     protected void nestedListExistsRootDeleted(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertEmpty(change.getCreatedData());
         assertEmpty(change.getUpdatedData());
@@ -76,7 +76,7 @@ public class RootScopeSubtreeTest extends DefaultDataChangeListenerTestSuite {
 
     @Override
     protected void existingOneNestedWriteAdditionalNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertContains(change.getCreatedData(), path(FOO,BAZ));
         assertNotContains(change.getCreatedData(), path(FOO,BAR));
@@ -86,7 +86,7 @@ public class RootScopeSubtreeTest extends DefaultDataChangeListenerTestSuite {
 
     @Override
     protected void existingTopWriteTwoNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertContains(change.getCreatedData(), path(FOO,BAR),path(FOO,BAZ));
         assertContains(change.getUpdatedData(), TOP_LEVEL, path(FOO));
@@ -96,7 +96,7 @@ public class RootScopeSubtreeTest extends DefaultDataChangeListenerTestSuite {
 
     @Override
     protected void existingTopWriteSibling(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertContains(change.getCreatedData(), path(FOO_SIBLING));
         assertContains(change.getUpdatedData(), TOP_LEVEL);
index 5cba93a712f6b313d0a40bd9ece03ec88135d567..364712c7b393ba87ee1d4900e9b3fab9fed41185 100644 (file)
@@ -34,7 +34,8 @@ public class SchemaUpdateForTransactionTest {
 
     @Before
     public void setupStore() {
-        domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.sameThreadExecutor());
+        domStore = new InMemoryDOMDataStore("TEST", MoreExecutors.sameThreadExecutor(),
+                MoreExecutors.sameThreadExecutor());
         loadSchemas(RockTheHouseInput.class);
     }
 
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/TestDCLExecutorService.java b/opendaylight/md-sal/sal-inmemory-datastore/src/test/java/org/opendaylight/controller/md/sal/dom/store/impl/TestDCLExecutorService.java
new file mode 100644 (file)
index 0000000..f6e6461
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.md.sal.dom.store.impl;
+
+import java.util.concurrent.ExecutorService;
+
+import com.google.common.util.concurrent.ForwardingExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+
+/**
+ * A forwarding Executor used by unit tests for DataChangeListener notifications
+ *
+ * @author Thomas Pantelis
+ */
+public class TestDCLExecutorService extends ForwardingExecutorService {
+
+    // Start with a same thread executor to avoid timing issues during test setup.
+    private volatile ExecutorService currentExecutor = MoreExecutors.sameThreadExecutor();
+
+    // The real executor to use when test setup is complete.
+    private final ExecutorService postSetupExecutor;
+
+
+    public TestDCLExecutorService( ExecutorService postSetupExecutor ) {
+        this.postSetupExecutor = postSetupExecutor;
+    }
+
+    @Override
+    protected ExecutorService delegate() {
+        return currentExecutor;
+    }
+
+    public void afterTestSetup() {
+        // Test setup complete - switch to the real executor.
+        currentExecutor = postSetupExecutor;
+    }
+}
\ No newline at end of file
index 7c8676eff56728f9b5307e177f9ffcbd0c4bb87f..cdf465aacee9da2226d6a2f3b9721b6f8770306e 100644 (file)
@@ -11,8 +11,6 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
@@ -32,7 +30,7 @@ public class WildcardedScopeBaseTest extends DefaultDataChangeListenerTestSuite
     @Override
     public void putTopLevelOneNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertNotNull(change);
 
@@ -48,7 +46,7 @@ public class WildcardedScopeBaseTest extends DefaultDataChangeListenerTestSuite
     public void replaceTopLevelNestedChanged(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
         assertNotNull(change);
 
         assertContains(change.getCreatedData(), path(FOO, BAZ));
@@ -62,7 +60,7 @@ public class WildcardedScopeBaseTest extends DefaultDataChangeListenerTestSuite
     protected void putTopLevelWithTwoNested(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
         assertNotNull(change);
         assertFalse(change.getCreatedData().isEmpty());
 
@@ -77,7 +75,6 @@ public class WildcardedScopeBaseTest extends DefaultDataChangeListenerTestSuite
     protected void twoNestedExistsOneIsDeleted(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        Future<?> future = task.getChangeEvent();
         /*
          * Base listener should be notified only and only if actual node changed its state,
          * since deletion of child, did not result in change of node we are listening
@@ -85,14 +82,14 @@ public class WildcardedScopeBaseTest extends DefaultDataChangeListenerTestSuite
          * and this means settable future containing receivedDataChangeEvent is not done.
          *
          */
-        assertFalse(future.isDone());
+        task.verifyNoChangeEvent();
     }
 
     @Override
     public void nestedListExistsRootDeleted(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertEmpty(change.getCreatedData());
         assertEmpty(change.getUpdatedData());
@@ -103,7 +100,6 @@ public class WildcardedScopeBaseTest extends DefaultDataChangeListenerTestSuite
 
     @Override
     protected void existingOneNestedWriteAdditionalNested(final DatastoreTestTask task) {
-        Future<?> future = task.getChangeEvent();
         /*
          * One listener should be notified only and only if actual node changed its state,
          * since deletion of nested child (in this case /nested-list/nested-list[foo],
@@ -112,12 +108,11 @@ public class WildcardedScopeBaseTest extends DefaultDataChangeListenerTestSuite
          * and this means settable future containing receivedDataChangeEvent is not done.
          *
          */
-        assertFalse(future.isDone());
+        task.verifyNoChangeEvent();
     }
 
     @Override
     protected void existingTopWriteTwoNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
-        Future<?> future = task.getChangeEvent();
         /*
          * One listener should be notified only and only if actual node changed its state,
          * since deletion of nested child (in this case /nested-list/nested-list[foo],
@@ -126,12 +121,12 @@ public class WildcardedScopeBaseTest extends DefaultDataChangeListenerTestSuite
          * and this means settable future containing receivedDataChangeEvent is not done.
          *
          */
-        assertFalse(future.isDone());
+        task.verifyNoChangeEvent();
     }
 
     @Override
     protected void existingTopWriteSibling(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertContains(change.getCreatedData(), path(FOO_SIBLING));
         assertNotContains(change.getUpdatedData(), path(FOO), TOP_LEVEL);
index ac18d5c976d2ae0e9691d52eeac14fa7aef8da91..3407e0ffa4c6511a55b7d11c5bdd4ca9a0a718f2 100644 (file)
@@ -11,8 +11,6 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeEvent;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
@@ -32,7 +30,7 @@ public class WildcardedScopeOneTest extends DefaultDataChangeListenerTestSuite {
     @Override
     public void putTopLevelOneNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertNotNull(change);
 
@@ -48,7 +46,7 @@ public class WildcardedScopeOneTest extends DefaultDataChangeListenerTestSuite {
     public void replaceTopLevelNestedChanged(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
         assertNotNull(change);
 
         assertContains(change.getCreatedData(), path(FOO, BAZ));
@@ -62,7 +60,7 @@ public class WildcardedScopeOneTest extends DefaultDataChangeListenerTestSuite {
     protected void putTopLevelWithTwoNested(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
         assertNotNull(change);
         assertFalse(change.getCreatedData().isEmpty());
 
@@ -77,7 +75,6 @@ public class WildcardedScopeOneTest extends DefaultDataChangeListenerTestSuite {
     protected void twoNestedExistsOneIsDeleted(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        Future<?> future = task.getChangeEvent();
         /*
          * One listener should be notified only and only if actual node changed its state,
          * since deletion of nested child (in this case /nested-list/nested-list[foo],
@@ -86,14 +83,14 @@ public class WildcardedScopeOneTest extends DefaultDataChangeListenerTestSuite {
          * and this means settable future containing receivedDataChangeEvent is not done.
          *
          */
-        assertFalse(future.isDone());
+        task.verifyNoChangeEvent();
     }
 
     @Override
     public void nestedListExistsRootDeleted(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertEmpty(change.getCreatedData());
         assertEmpty(change.getUpdatedData());
@@ -104,7 +101,6 @@ public class WildcardedScopeOneTest extends DefaultDataChangeListenerTestSuite {
 
     @Override
     protected void existingOneNestedWriteAdditionalNested(final DatastoreTestTask task) {
-        Future<?> future = task.getChangeEvent();
         /*
          * One listener should be notified only and only if actual node changed its state,
          * since deletion of nested child (in this case /nested-list/nested-list[foo],
@@ -113,12 +109,11 @@ public class WildcardedScopeOneTest extends DefaultDataChangeListenerTestSuite {
          * and this means settable future containing receivedDataChangeEvent is not done.
          *
          */
-        assertFalse(future.isDone());
+        task.verifyNoChangeEvent();
     }
 
     @Override
     protected void existingTopWriteTwoNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
-        Future<?> future = task.getChangeEvent();
         /*
          * One listener should be notified only and only if actual node changed its state,
          * since deletion of nested child (in this case /nested-list/nested-list[foo],
@@ -127,12 +122,12 @@ public class WildcardedScopeOneTest extends DefaultDataChangeListenerTestSuite {
          * and this means settable future containing receivedDataChangeEvent is not done.
          *
          */
-        assertFalse(future.isDone());
+        task.verifyNoChangeEvent();
     }
 
     @Override
     protected void existingTopWriteSibling(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertContains(change.getCreatedData(), path(FOO_SIBLING));
         assertNotContains(change.getUpdatedData(),path(FOO), TOP_LEVEL);
index 7e67242dd3f941490d54f53bcb96d5987161d01f..a7fa24f2934a8da5dc879236147f6fd35df14cba 100644 (file)
@@ -32,7 +32,7 @@ public class WildcardedScopeSubtreeTest extends DefaultDataChangeListenerTestSui
     @Override
     public void putTopLevelOneNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertNotContains(change.getCreatedData(), TOP_LEVEL);
         assertContains(change.getCreatedData(), path(FOO), path(FOO, BAR));
@@ -45,7 +45,7 @@ public class WildcardedScopeSubtreeTest extends DefaultDataChangeListenerTestSui
     public void replaceTopLevelNestedChanged(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
         assertNotNull(change);
 
         assertContains(change.getCreatedData(), path(FOO, BAZ));
@@ -59,7 +59,7 @@ public class WildcardedScopeSubtreeTest extends DefaultDataChangeListenerTestSui
     protected void putTopLevelWithTwoNested(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
         assertNotNull(change);
         assertFalse(change.getCreatedData().isEmpty());
 
@@ -74,7 +74,7 @@ public class WildcardedScopeSubtreeTest extends DefaultDataChangeListenerTestSui
     protected void twoNestedExistsOneIsDeleted(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
         assertNotNull(change);
         assertTrue(change.getCreatedData().isEmpty());
         assertContains(change.getUpdatedData(), path(FOO));
@@ -86,7 +86,7 @@ public class WildcardedScopeSubtreeTest extends DefaultDataChangeListenerTestSui
     public void nestedListExistsRootDeleted(final DatastoreTestTask task) throws InterruptedException,
             ExecutionException {
 
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertEmpty(change.getCreatedData());
         assertEmpty(change.getUpdatedData());
@@ -97,7 +97,7 @@ public class WildcardedScopeSubtreeTest extends DefaultDataChangeListenerTestSui
 
     @Override
     protected void existingOneNestedWriteAdditionalNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertContains(change.getCreatedData(), path(FOO,BAZ));
         assertNotContains(change.getCreatedData(), path(FOO,BAR));
@@ -108,7 +108,7 @@ public class WildcardedScopeSubtreeTest extends DefaultDataChangeListenerTestSui
 
     @Override
     protected void existingTopWriteTwoNested(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertContains(change.getCreatedData(), path(FOO,BAR),path(FOO,BAZ));
         assertContains(change.getUpdatedData(), path(FOO));
@@ -118,7 +118,7 @@ public class WildcardedScopeSubtreeTest extends DefaultDataChangeListenerTestSui
 
     @Override
     protected void existingTopWriteSibling(final DatastoreTestTask task) throws InterruptedException, ExecutionException {
-        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent().get();
+        AsyncDataChangeEvent<YangInstanceIdentifier, NormalizedNode<?, ?>> change = task.getChangeEvent();
 
         assertContains(change.getCreatedData(), path(FOO_SIBLING));
         assertNotContains(change.getUpdatedData(), path(FOO), TOP_LEVEL);
index b75df80f4ef4b3091975e3b5e0f50a70ed9bc71c..bca47af5c0b3deddaeb1bacd1c86346892eaeafa 100644 (file)
@@ -10,12 +10,10 @@ package org.opendaylight.controller.config.yang.md.sal.connector.netconf;
 import static org.opendaylight.controller.config.api.JmxAttributeValidationException.checkCondition;
 import static org.opendaylight.controller.config.api.JmxAttributeValidationException.checkNotNull;
 
-import java.io.File;
-import java.io.InputStream;
+import com.google.common.base.Optional;
 import java.net.InetSocketAddress;
 import java.util.List;
 import java.util.concurrent.ExecutorService;
-
 import org.opendaylight.controller.config.api.JmxAttributeValidationException;
 import org.opendaylight.controller.netconf.client.NetconfClientDispatcher;
 import org.opendaylight.controller.netconf.client.conf.NetconfClientConfiguration;
@@ -25,9 +23,11 @@ import org.opendaylight.controller.netconf.nettyutil.handler.ssh.authentication.
 import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
 import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
 import org.opendaylight.controller.sal.connect.netconf.NetconfDevice;
+import org.opendaylight.controller.sal.connect.netconf.NetconfStateSchemas;
 import org.opendaylight.controller.sal.connect.netconf.listener.NetconfDeviceCommunicator;
 import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
 import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceSalFacade;
+import org.opendaylight.controller.sal.connect.netconf.schema.mapping.NetconfMessageTransformer;
 import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
 import org.opendaylight.controller.sal.core.api.Broker;
 import org.opendaylight.protocol.framework.ReconnectStrategy;
@@ -35,16 +35,12 @@ import org.opendaylight.protocol.framework.ReconnectStrategyFactory;
 import org.opendaylight.protocol.framework.TimedReconnectStrategy;
 import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.Host;
 import org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev100924.IpAddress;
-import org.opendaylight.yangtools.yang.model.util.repo.AbstractCachingSchemaSourceProvider;
-import org.opendaylight.yangtools.yang.model.util.repo.FilesystemSchemaCachingProvider;
-import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProvider;
-import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProviders;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaContextFactory;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceRegistry;
 import org.osgi.framework.BundleContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Optional;
-
 /**
  *
  */
@@ -52,9 +48,10 @@ public final class NetconfConnectorModule extends org.opendaylight.controller.co
 {
     private static final Logger logger = LoggerFactory.getLogger(NetconfConnectorModule.class);
 
-    private static AbstractCachingSchemaSourceProvider<String, InputStream> GLOBAL_NETCONF_SOURCE_PROVIDER = null;
     private BundleContext bundleContext;
     private Optional<NetconfSessionCapabilities> userCapabilities;
+    private SchemaSourceRegistry schemaRegistry;
+    private SchemaContextFactory schemaContextFactory;
 
     public NetconfConnectorModule(final org.opendaylight.controller.config.api.ModuleIdentifier identifier, final org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
         super(identifier, dependencyResolver);
@@ -108,8 +105,12 @@ public final class NetconfConnectorModule extends org.opendaylight.controller.co
 
         final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade
                 = new NetconfDeviceSalFacade(id, domBroker, bindingBroker, bundleContext, globalProcessingExecutor);
+
+        final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO =
+                new NetconfDevice.SchemaResourcesDTO(schemaRegistry, schemaContextFactory, new NetconfStateSchemas.NetconfStateSchemasResolverImpl());
+
         final NetconfDevice device =
-                NetconfDevice.createNetconfDevice(id, getGlobalNetconfSchemaProvider(), globalProcessingExecutor, salFacade);
+                new NetconfDevice(schemaResourcesDTO, id, salFacade, globalProcessingExecutor, new NetconfMessageTransformer());
 
         final NetconfDeviceCommunicator listener = userCapabilities.isPresent() ?
                 new NetconfDeviceCommunicator(id, device, userCapabilities.get()) : new NetconfDeviceCommunicator(id, device);
@@ -148,17 +149,6 @@ public final class NetconfConnectorModule extends org.opendaylight.controller.co
         return Optional.of(parsedOverrideCapabilities);
     }
 
-    private synchronized AbstractCachingSchemaSourceProvider<String, InputStream> getGlobalNetconfSchemaProvider() {
-        if(GLOBAL_NETCONF_SOURCE_PROVIDER == null) {
-            final String storageFile = "cache/schema";
-            //            File directory = bundleContext.getDataFile(storageFile);
-            final File directory = new File(storageFile);
-            final SchemaSourceProvider<String> defaultProvider = SchemaSourceProviders.noopProvider();
-            GLOBAL_NETCONF_SOURCE_PROVIDER = FilesystemSchemaCachingProvider.createFromStringSourceProvider(defaultProvider, directory);
-        }
-        return GLOBAL_NETCONF_SOURCE_PROVIDER;
-    }
-
     public void setBundleContext(final BundleContext bundleContext) {
         this.bundleContext = bundleContext;
     }
@@ -212,4 +202,12 @@ public final class NetconfConnectorModule extends org.opendaylight.controller.co
             return new InetSocketAddress(ip, getPort().getValue());
         }
     }
+
+    public void setSchemaRegistry(final SchemaSourceRegistry schemaRegistry) {
+        this.schemaRegistry = schemaRegistry;
+    }
+
+    public void setSchemaContextFactory(final SchemaContextFactory schemaContextFactory) {
+        this.schemaContextFactory = schemaContextFactory;
+    }
 }
index 9842139dab7cfaa311b8dc222fd1f41b8f2554c6..b6299697cc5f08d91dd6f9588f03e5af1a340e5e 100644 (file)
@@ -7,9 +7,17 @@
  */
 package org.opendaylight.controller.config.yang.md.sal.connector.netconf;
 
+import java.io.File;
+
 import org.opendaylight.controller.config.api.DependencyResolver;
 import org.opendaylight.controller.config.api.DynamicMBeanWithInstance;
 import org.opendaylight.controller.config.spi.Module;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaContextFactory;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceFilter;
+import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.util.FilesystemSchemaSourceCache;
+import org.opendaylight.yangtools.yang.parser.repo.SharedSchemaRepository;
+import org.opendaylight.yangtools.yang.parser.util.TextToASTTransformer;
 import org.osgi.framework.BundleContext;
 
 /**
@@ -18,20 +26,38 @@ import org.osgi.framework.BundleContext;
 public class NetconfConnectorModuleFactory extends
         org.opendaylight.controller.config.yang.md.sal.connector.netconf.AbstractNetconfConnectorModuleFactory {
 
+    // TODO this should be injected
+    // Netconf devices have separated schema registry + factory from controller
+    private final SharedSchemaRepository repository = new SharedSchemaRepository(NAME);
+    private final SchemaContextFactory schemaContextFactory
+            = repository.createSchemaContextFactory(SchemaSourceFilter.ALWAYS_ACCEPT);
+
+    public NetconfConnectorModuleFactory() {
+        // Start cache and Text to AST transformer
+        final FilesystemSchemaSourceCache<YangTextSchemaSource> cache = new FilesystemSchemaSourceCache<>(repository, YangTextSchemaSource.class, new File("cache/schema"));
+        repository.registerSchemaSourceListener(cache);
+        repository.registerSchemaSourceListener(TextToASTTransformer.create(repository, repository));
+    }
+
     @Override
-    public Module createModule(String instanceName, DependencyResolver dependencyResolver,
-            DynamicMBeanWithInstance old, BundleContext bundleContext) throws Exception {
-        NetconfConnectorModule module = (NetconfConnectorModule) super.createModule(instanceName, dependencyResolver,
+    public Module createModule(final String instanceName, final DependencyResolver dependencyResolver,
+            final DynamicMBeanWithInstance old, final BundleContext bundleContext) throws Exception {
+        final NetconfConnectorModule module = (NetconfConnectorModule) super.createModule(instanceName, dependencyResolver,
                 old, bundleContext);
+
         module.setBundleContext(bundleContext);
+        module.setSchemaRegistry(repository);
+        module.setSchemaContextFactory(schemaContextFactory);
         return module;
     }
 
     @Override
-    public Module createModule(String instanceName, DependencyResolver dependencyResolver, BundleContext bundleContext) {
-        NetconfConnectorModule module = (NetconfConnectorModule) super.createModule(instanceName, dependencyResolver,
+    public Module createModule(final String instanceName, final DependencyResolver dependencyResolver, final BundleContext bundleContext) {
+        final NetconfConnectorModule module = (NetconfConnectorModule) super.createModule(instanceName, dependencyResolver,
                 bundleContext);
         module.setBundleContext(bundleContext);
+        module.setSchemaRegistry(repository);
+        module.setSchemaContextFactory(schemaContextFactory);
         return module;
     }
 }
index b2845d5533358a059bd7ecc4014ebae5717f0ee6..269c4af82fcb88c893cc6087c5ca63df3451738b 100644 (file)
@@ -9,11 +9,11 @@ package org.opendaylight.controller.sal.connect.api;
 
 import org.opendaylight.controller.sal.core.api.RpcImplementation;
 import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
 public interface RemoteDeviceHandler<PREF> extends AutoCloseable {
 
-    void onDeviceConnected(SchemaContextProvider remoteSchemaContextProvider,
+    void onDeviceConnected(SchemaContext remoteSchemaContext,
                            PREF netconfSessionPreferences, RpcImplementation deviceRpc);
 
     void onDeviceDisconnected();
index 350132cf99a5dfb05681b0732912e7a4220335d2..cc9eb5a851271c8ed221d94038b9db5e35f92059 100644 (file)
@@ -7,41 +7,47 @@
  */
 package org.opendaylight.controller.sal.connect.netconf;
 
-import java.io.InputStream;
+import com.google.common.base.Function;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Collections2;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ListeningExecutorService;
+import com.google.common.util.concurrent.MoreExecutors;
+import java.util.Collection;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
-
 import org.opendaylight.controller.netconf.api.NetconfMessage;
-import org.opendaylight.controller.netconf.util.xml.XmlUtil;
 import org.opendaylight.controller.sal.connect.api.MessageTransformer;
 import org.opendaylight.controller.sal.connect.api.RemoteDevice;
 import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
 import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
-import org.opendaylight.controller.sal.connect.api.SchemaContextProviderFactory;
-import org.opendaylight.controller.sal.connect.api.SchemaSourceProviderFactory;
 import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
 import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceRpc;
-import org.opendaylight.controller.sal.connect.netconf.schema.NetconfDeviceSchemaProviderFactory;
-import org.opendaylight.controller.sal.connect.netconf.schema.NetconfRemoteSchemaSourceProvider;
-import org.opendaylight.controller.sal.connect.netconf.schema.mapping.NetconfMessageTransformer;
+import org.opendaylight.controller.sal.connect.netconf.schema.NetconfRemoteSchemaYangSourceProvider;
 import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
-import org.opendaylight.controller.sal.core.api.RpcImplementation;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
-import org.opendaylight.yangtools.yang.model.util.repo.AbstractCachingSchemaSourceProvider;
-import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProvider;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.repo.api.MissingSchemaSourceException;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaContextFactory;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaResolutionException;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceRepresentation;
+import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.spi.PotentialSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceRegistration;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceRegistry;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ListeningExecutorService;
-import com.google.common.util.concurrent.MoreExecutors;
-
 /**
  *  This is a mediator between NetconfDeviceCommunicator and NetconfDeviceSalFacade
  */
@@ -49,51 +55,33 @@ public final class NetconfDevice implements RemoteDevice<NetconfSessionCapabilit
 
     private static final Logger logger = LoggerFactory.getLogger(NetconfDevice.class);
 
+    public static final Function<QName, SourceIdentifier> QNAME_TO_SOURCE_ID_FUNCTION = new Function<QName, SourceIdentifier>() {
+        @Override
+        public SourceIdentifier apply(final QName input) {
+            return new SourceIdentifier(input.getLocalName(), Optional.fromNullable(input.getFormattedRevision()));
+        }
+    };
+
     private final RemoteDeviceId id;
 
+    private final SchemaContextFactory schemaContextFactory;
     private final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade;
     private final ListeningExecutorService processingExecutor;
+    private final SchemaSourceRegistry schemaRegistry;
     private final MessageTransformer<NetconfMessage> messageTransformer;
-    private final SchemaContextProviderFactory schemaContextProviderFactory;
-    private final SchemaSourceProviderFactory<InputStream> sourceProviderFactory;
     private final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver;
     private final NotificationHandler notificationHandler;
+    private final List<SchemaSourceRegistration<? extends SchemaSourceRepresentation>> sourceRegistrations = Lists.newArrayList();
 
-    public static NetconfDevice createNetconfDevice(final RemoteDeviceId id,
-            final AbstractCachingSchemaSourceProvider<String, InputStream> schemaSourceProvider,
-            final ExecutorService executor, final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade) {
-        return createNetconfDevice(id, schemaSourceProvider, executor, salFacade, new NetconfStateSchemas.NetconfStateSchemasResolverImpl());
-    }
-
-    @VisibleForTesting
-    protected static NetconfDevice createNetconfDevice(final RemoteDeviceId id,
-            final AbstractCachingSchemaSourceProvider<String, InputStream> schemaSourceProvider,
-            final ExecutorService executor, final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade,
-            final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver) {
-
-        return new NetconfDevice(id, salFacade, executor, new NetconfMessageTransformer(),
-                new NetconfDeviceSchemaProviderFactory(id), new SchemaSourceProviderFactory<InputStream>() {
-                    @Override
-                    public SchemaSourceProvider<InputStream> createSourceProvider(final RpcImplementation deviceRpc) {
-                        return schemaSourceProvider.createInstanceFor(new NetconfRemoteSchemaSourceProvider(id,
-                                deviceRpc));
-                    }
-                }, stateSchemasResolver);
-    }
-
-    @VisibleForTesting
-    protected NetconfDevice(final RemoteDeviceId id, final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade,
-                            final ExecutorService processingExecutor, final MessageTransformer<NetconfMessage> messageTransformer,
-                            final SchemaContextProviderFactory schemaContextProviderFactory,
-                            final SchemaSourceProviderFactory<InputStream> sourceProviderFactory,
-                            final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver) {
+    public NetconfDevice(final SchemaResourcesDTO schemaResourcesDTO, final RemoteDeviceId id, final RemoteDeviceHandler<NetconfSessionCapabilities> salFacade,
+                         final ExecutorService globalProcessingExecutor, final MessageTransformer<NetconfMessage> messageTransformer) {
         this.id = id;
+        this.schemaRegistry = schemaResourcesDTO.getSchemaRegistry();
         this.messageTransformer = messageTransformer;
+        this.schemaContextFactory = schemaResourcesDTO.getSchemaContextFactory();
         this.salFacade = salFacade;
-        this.sourceProviderFactory = sourceProviderFactory;
-        this.stateSchemasResolver = stateSchemasResolver;
-        this.processingExecutor = MoreExecutors.listeningDecorator(processingExecutor);
-        this.schemaContextProviderFactory = schemaContextProviderFactory;
+        this.stateSchemasResolver = schemaResourcesDTO.getStateSchemasResolver();
+        this.processingExecutor = MoreExecutors.listeningDecorator(globalProcessingExecutor);
         this.notificationHandler = new NotificationHandler(salFacade, messageTransformer, id);
     }
 
@@ -107,60 +95,73 @@ public final class NetconfDevice implements RemoteDevice<NetconfSessionCapabilit
         // http://netty.io/wiki/thread-model.html
         logger.debug("{}: Session to remote device established with {}", id, remoteSessionCapabilities);
 
-        final ListenableFuture<?> salInitializationFuture = processingExecutor.submit(new Runnable() {
+        final NetconfDeviceRpc deviceRpc = setUpDeviceRpc(listener);
+
+        final DeviceSourcesResolver task = new DeviceSourcesResolver(deviceRpc, remoteSessionCapabilities, id, stateSchemasResolver);
+        final ListenableFuture<DeviceSources> sourceResolverFuture = processingExecutor.submit(task);
+
+        final FutureCallback<DeviceSources> resolvedSourceCallback = new FutureCallback<DeviceSources>() {
             @Override
-            public void run() {
-                final NetconfDeviceRpc deviceRpc = setUpDeviceRpc(remoteSessionCapabilities, listener);
-
-                final NetconfStateSchemas availableSchemas = stateSchemasResolver.resolve(deviceRpc, remoteSessionCapabilities, id);
-                logger.warn("{}: Schemas exposed by ietf-netconf-monitoring: {}", id, availableSchemas.getAvailableYangSchemasQNames());
-                // TODO use this for shared schema context
-
-                final SchemaSourceProvider<InputStream> delegate = sourceProviderFactory.createSourceProvider(deviceRpc);
-                final SchemaContextProvider schemaContextProvider = setUpSchemaContext(delegate, remoteSessionCapabilities);
-                updateMessageTransformer(schemaContextProvider);
-                salFacade.onDeviceConnected(schemaContextProvider, remoteSessionCapabilities, deviceRpc);
-                notificationHandler.onRemoteSchemaUp();
+            public void onSuccess(final DeviceSources result) {
+                addProvidedSourcesToSchemaRegistry(deviceRpc, result);
+                setUpSchema(result);
             }
-        });
 
-        Futures.addCallback(salInitializationFuture, new FutureCallback<Object>() {
-            @Override
-            public void onSuccess(final Object result) {
-                logger.debug("{}: Initialization in sal successful", id);
-                logger.info("{}: Netconf connector initialized successfully", id);
+            private void setUpSchema(final DeviceSources result) {
+                processingExecutor.submit(new RecursiveSchemaSetup(result, remoteSessionCapabilities, deviceRpc, listener));
             }
 
             @Override
             public void onFailure(final Throwable t) {
-                // Unable to initialize device, set as disconnected
-                logger.error("{}: Initialization failed", id, t);
-                salFacade.onDeviceDisconnected();
-                // TODO ssh connection is still open if sal initialization fails
+                logger.warn("{}: Unexpected error resolving device sources: {}", id, t);
+                handleSalInitializationFailure(t, listener);
             }
-        });
+        };
+
+        Futures.addCallback(sourceResolverFuture, resolvedSourceCallback);
+    }
+
+    private void handleSalInitializationSuccess(final SchemaContext result, final NetconfSessionCapabilities remoteSessionCapabilities, final NetconfDeviceRpc deviceRpc) {
+        updateMessageTransformer(result);
+        salFacade.onDeviceConnected(result, remoteSessionCapabilities, deviceRpc);
+        notificationHandler.onRemoteSchemaUp();
+
+        logger.debug("{}: Initialization in sal successful", id);
+        logger.info("{}: Netconf connector initialized successfully", id);
+    }
+
+    private void handleSalInitializationFailure(final Throwable t, final RemoteDeviceCommunicator<NetconfMessage> listener) {
+        logger.error("{}: Initialization in sal failed, disconnecting from device", id, t);
+        listener.close();
+        onRemoteSessionDown();
     }
 
     /**
      * Update initial message transformer to use retrieved schema
+     * @param currentSchemaContext
      */
-    private void updateMessageTransformer(final SchemaContextProvider schemaContextProvider) {
-        messageTransformer.onGlobalContextUpdated(schemaContextProvider.getSchemaContext());
+    private void updateMessageTransformer(final SchemaContext currentSchemaContext) {
+        messageTransformer.onGlobalContextUpdated(currentSchemaContext);
     }
 
-    private SchemaContextProvider setUpSchemaContext(final SchemaSourceProvider<InputStream> sourceProvider, final NetconfSessionCapabilities capabilities) {
-        return schemaContextProviderFactory.createContextProvider(capabilities.getModuleBasedCaps(), sourceProvider);
+    private void addProvidedSourcesToSchemaRegistry(final NetconfDeviceRpc deviceRpc, final DeviceSources deviceSources) {
+        final NetconfRemoteSchemaYangSourceProvider yangProvider = new NetconfRemoteSchemaYangSourceProvider(id, deviceRpc);
+        for (final SourceIdentifier sourceId : deviceSources.getProvidedSources()) {
+            sourceRegistrations.add(schemaRegistry.registerSchemaSource(yangProvider,
+                    PotentialSchemaSource.create(sourceId, YangTextSchemaSource.class, PotentialSchemaSource.Costs.REMOTE_IO.getValue())));
+        }
     }
 
-    private NetconfDeviceRpc setUpDeviceRpc(final NetconfSessionCapabilities capHolder, final RemoteDeviceCommunicator<NetconfMessage> listener) {
-        Preconditions.checkArgument(capHolder.isMonitoringSupported(),
-                "%s: Netconf device does not support netconf monitoring, yang schemas cannot be acquired. Netconf device capabilities", capHolder);
-        return new NetconfDeviceRpc(listener, messageTransformer);
+    private NetconfDeviceRpc setUpDeviceRpc(final RemoteDeviceCommunicator<NetconfMessage> listener) {
+       return new NetconfDeviceRpc(listener, messageTransformer);
     }
 
     @Override
     public void onRemoteSessionDown() {
         salFacade.onDeviceDisconnected();
+        for (final SchemaSourceRegistration<? extends SchemaSourceRepresentation> sourceRegistration : sourceRegistrations) {
+            sourceRegistration.close();
+        }
     }
 
     @Override
@@ -169,59 +170,181 @@ public final class NetconfDevice implements RemoteDevice<NetconfSessionCapabilit
     }
 
     /**
-     * Handles incoming notifications. Either caches them(until onRemoteSchemaUp is called) or passes to sal Facade.
+     * Just a transfer object containing schema related dependencies. Injected in constructor.
      */
-    private final static class NotificationHandler {
+    public static class SchemaResourcesDTO {
+        private final SchemaSourceRegistry schemaRegistry;
+        private final SchemaContextFactory schemaContextFactory;
+        private final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver;
+
+        public SchemaResourcesDTO(final SchemaSourceRegistry schemaRegistry, final SchemaContextFactory schemaContextFactory, final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver) {
+            this.schemaRegistry = Preconditions.checkNotNull(schemaRegistry);
+            this.schemaContextFactory = Preconditions.checkNotNull(schemaContextFactory);
+            this.stateSchemasResolver = Preconditions.checkNotNull(stateSchemasResolver);
+        }
+
+        public SchemaSourceRegistry getSchemaRegistry() {
+            return schemaRegistry;
+        }
+
+        public SchemaContextFactory getSchemaContextFactory() {
+            return schemaContextFactory;
+        }
 
-        private final RemoteDeviceHandler<?> salFacade;
-        private final List<NetconfMessage> cache = new LinkedList<>();
-        private final MessageTransformer<NetconfMessage> messageTransformer;
-        private boolean passNotifications = false;
+        public NetconfStateSchemas.NetconfStateSchemasResolver getStateSchemasResolver() {
+            return stateSchemasResolver;
+        }
+    }
+
+    /**
+     * Schema building callable.
+     */
+    private static class DeviceSourcesResolver implements Callable<DeviceSources> {
+        private final NetconfDeviceRpc deviceRpc;
+        private final NetconfSessionCapabilities remoteSessionCapabilities;
         private final RemoteDeviceId id;
+        private final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver;
 
-        NotificationHandler(final RemoteDeviceHandler<?> salFacade, final MessageTransformer<NetconfMessage> messageTransformer, final RemoteDeviceId id) {
-            this.salFacade = salFacade;
-            this.messageTransformer = messageTransformer;
+        public DeviceSourcesResolver(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id, final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver) {
+            this.deviceRpc = deviceRpc;
+            this.remoteSessionCapabilities = remoteSessionCapabilities;
             this.id = id;
+            this.stateSchemasResolver = stateSchemasResolver;
         }
 
-        synchronized void handleNotification(final NetconfMessage notification) {
-            if(passNotifications) {
-                passNotification(messageTransformer.toNotification(notification));
-            } else {
-                cacheNotification(notification);
+        @Override
+        public DeviceSources call() throws Exception {
+
+            final Set<SourceIdentifier> requiredSources = Sets.newHashSet(Collections2.transform(
+                    remoteSessionCapabilities.getModuleBasedCaps(), QNAME_TO_SOURCE_ID_FUNCTION));
+
+            // If monitoring is not supported, we will still attempt to create schema, sources might be already provided
+            final NetconfStateSchemas availableSchemas = stateSchemasResolver.resolve(deviceRpc, remoteSessionCapabilities, id);
+            logger.debug("{}: Schemas exposed by ietf-netconf-monitoring: {}", id, availableSchemas.getAvailableYangSchemasQNames());
+
+            final Set<SourceIdentifier> providedSources = Sets.newHashSet(Collections2.transform(
+                    availableSchemas.getAvailableYangSchemasQNames(), QNAME_TO_SOURCE_ID_FUNCTION));
+
+            final Set<SourceIdentifier> requiredSourcesNotProvided = Sets.difference(requiredSources, providedSources);
+
+            if (!requiredSourcesNotProvided.isEmpty()) {
+                logger.warn("{}: Netconf device does not provide all yang models reported in hello message capabilities, required but not provided: {}",
+                        id, requiredSourcesNotProvided);
+                logger.warn("{}: Attempting to build schema context from required sources", id);
             }
-        }
 
-        /**
-         * Forward all cached notifications and pass all notifications from this point directly to sal facade.
-         */
-        synchronized void onRemoteSchemaUp() {
-            passNotifications = true;
 
-            for (final NetconfMessage cachedNotification : cache) {
-                passNotification(messageTransformer.toNotification(cachedNotification));
+            // TODO should we perform this ? We have a mechanism to fix initialization of devices not reporting or required modules in hello
+            // That is overriding capabilities in configuration using attribute yang-module-capabilities
+            // This is more user friendly even though it clashes with attribute yang-module-capabilities
+            // Some devices do not report all required models in hello message, but provide them
+            final Set<SourceIdentifier> providedSourcesNotRequired = Sets.difference(providedSources, requiredSources);
+            if (!providedSourcesNotRequired.isEmpty()) {
+                logger.warn("{}: Netconf device provides additional yang models not reported in hello message capabilities: {}",
+                        id, providedSourcesNotRequired);
+                logger.warn("{}: Adding provided but not required sources as required to prevent failures", id);
+                requiredSources.addAll(providedSourcesNotRequired);
             }
 
-            cache.clear();
+            return new DeviceSources(requiredSources, providedSources);
         }
+    }
 
-        private void cacheNotification(final NetconfMessage notification) {
-            Preconditions.checkState(passNotifications == false);
+    /**
+     * Contains RequiredSources - sources from capabilities.
+     *
+     */
+    private static final class DeviceSources {
+        private final Collection<SourceIdentifier> requiredSources;
+        private final Collection<SourceIdentifier> providedSources;
 
-            logger.debug("{}: Caching notification {}, remote schema not yet fully built", id, notification);
-            if(logger.isTraceEnabled()) {
-                logger.trace("{}: Caching notification {}", id, XmlUtil.toString(notification.getDocument()));
-            }
+        public DeviceSources(final Collection<SourceIdentifier> requiredSources, final Collection<SourceIdentifier> providedSources) {
+            this.requiredSources = requiredSources;
+            this.providedSources = providedSources;
+        }
 
-            cache.add(notification);
+        public Collection<SourceIdentifier> getRequiredSources() {
+            return requiredSources;
         }
 
-        private void passNotification(final CompositeNode parsedNotification) {
-            logger.debug("{}: Forwarding notification {}", id, parsedNotification);
-            Preconditions.checkNotNull(parsedNotification);
-            salFacade.onNotification(parsedNotification);
+        public Collection<SourceIdentifier> getProvidedSources() {
+            return providedSources;
         }
+
     }
 
+    /**
+     * Schema builder that tries to build schema context from provided sources or biggest subset of it.
+     */
+    private final class RecursiveSchemaSetup implements Runnable {
+        private final DeviceSources deviceSources;
+        private final NetconfSessionCapabilities remoteSessionCapabilities;
+        private final NetconfDeviceRpc deviceRpc;
+        private final RemoteDeviceCommunicator<NetconfMessage> listener;
+
+        public RecursiveSchemaSetup(final DeviceSources deviceSources, final NetconfSessionCapabilities remoteSessionCapabilities, final NetconfDeviceRpc deviceRpc, final RemoteDeviceCommunicator<NetconfMessage> listener) {
+            this.deviceSources = deviceSources;
+            this.remoteSessionCapabilities = remoteSessionCapabilities;
+            this.deviceRpc = deviceRpc;
+            this.listener = listener;
+        }
+
+        @Override
+        public void run() {
+            setUpSchema(deviceSources.getRequiredSources());
+        }
+
+        /**
+         * Recursively build schema context, in case of success or final failure notify device
+         */
+        private void setUpSchema(final Collection<SourceIdentifier> requiredSources) {
+            logger.trace("{}: Trying to build schema context from {}", id, requiredSources);
+
+            // If no more sources, fail
+            if(requiredSources.isEmpty()) {
+                handleSalInitializationFailure(new IllegalStateException(id + ": No more sources for schema context"), listener);
+                return;
+            }
+
+            final CheckedFuture<SchemaContext, SchemaResolutionException> schemaBuilderFuture = schemaContextFactory.createSchemaContext(requiredSources);
+
+            final FutureCallback<SchemaContext> RecursiveSchemaBuilderCallback = new FutureCallback<SchemaContext>() {
+
+                @Override
+                public void onSuccess(final SchemaContext result) {
+                    logger.debug("{}: Schema context built successfully from {}", id, requiredSources);
+                    handleSalInitializationSuccess(result, remoteSessionCapabilities, deviceRpc);
+                }
+
+                @Override
+                public void onFailure(final Throwable t) {
+                    // In case source missing, try without it
+                    if (t instanceof MissingSchemaSourceException) {
+                        final SourceIdentifier missingSource = ((MissingSchemaSourceException) t).getSourceId();
+                        logger.warn("{}: Unable to build schema context, missing source {}, will reattempt without it", id, missingSource);
+                        setUpSchema(stripMissingSource(requiredSources, missingSource));
+
+                    // In case resolution error, try only with resolved sources
+                    } else if (t instanceof SchemaResolutionException) {
+                        // TODO check for infinite loop
+                        final SchemaResolutionException resolutionException = (SchemaResolutionException) t;
+                        logger.warn("{}: Unable to build schema context, unsatisfied imports {}, will reattempt with resolved only", id, resolutionException.getUnsatisfiedImports());
+                        setUpSchema(resolutionException.getResolvedSources());
+                    // unknown error, fail
+                    } else {
+                        handleSalInitializationFailure(t, listener);
+                    }
+                }
+            };
+
+            Futures.addCallback(schemaBuilderFuture, RecursiveSchemaBuilderCallback);
+        }
+
+        private Collection<SourceIdentifier> stripMissingSource(final Collection<SourceIdentifier> requiredSources, final SourceIdentifier sIdToRemove) {
+            final LinkedList<SourceIdentifier> sourceIdentifiers = Lists.newLinkedList(requiredSources);
+            final boolean removed = sourceIdentifiers.remove(sIdToRemove);
+            Preconditions.checkState(removed, "{}: Trying to remove {} from {} failed", id, sIdToRemove, requiredSources);
+            return sourceIdentifiers;
+        }
+    }
 }
index b5400347e7961c1a7c9b4bd621a47b6e7ff495e7..77e342641e060b67892f47f3083dbc4bf874ae66 100644 (file)
@@ -93,7 +93,7 @@ public final class NetconfStateSchemas {
      */
     private static NetconfStateSchemas create(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id) {
         if(remoteSessionCapabilities.isMonitoringSupported() == false) {
-            logger.warn("{}: Netconf monitoring not supported on device, cannot detect available schemas");
+            logger.warn("{}: Netconf monitoring not supported on device, cannot detect provided schemas");
             return EMPTY;
         }
 
diff --git a/opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/NotificationHandler.java b/opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/NotificationHandler.java
new file mode 100644 (file)
index 0000000..cc8960f
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.connect.netconf;
+
+import com.google.common.base.Preconditions;
+import java.util.LinkedList;
+import java.util.List;
+import org.opendaylight.controller.netconf.api.NetconfMessage;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.connect.api.MessageTransformer;
+import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Handles incoming notifications. Either caches them(until onRemoteSchemaUp is called) or passes to sal Facade.
+ */
+final class NotificationHandler {
+
+    private static final Logger logger = LoggerFactory.getLogger(NotificationHandler.class);
+
+    private final RemoteDeviceHandler<?> salFacade;
+    private final List<NetconfMessage> queue = new LinkedList<>();
+    private final MessageTransformer<NetconfMessage> messageTransformer;
+    private final RemoteDeviceId id;
+    private boolean passNotifications = false;
+
+    NotificationHandler(final RemoteDeviceHandler<?> salFacade, final MessageTransformer<NetconfMessage> messageTransformer, final RemoteDeviceId id) {
+        this.salFacade = Preconditions.checkNotNull(salFacade);
+        this.messageTransformer = Preconditions.checkNotNull(messageTransformer);
+        this.id = Preconditions.checkNotNull(id);
+    }
+
+    synchronized void handleNotification(final NetconfMessage notification) {
+        if(passNotifications) {
+            passNotification(messageTransformer.toNotification(notification));
+        } else {
+            queueNotification(notification);
+        }
+    }
+
+    /**
+     * Forward all cached notifications and pass all notifications from this point directly to sal facade.
+     */
+    synchronized void onRemoteSchemaUp() {
+        passNotifications = true;
+
+        for (final NetconfMessage cachedNotification : queue) {
+            passNotification(messageTransformer.toNotification(cachedNotification));
+        }
+
+        queue.clear();
+    }
+
+    private void queueNotification(final NetconfMessage notification) {
+        Preconditions.checkState(passNotifications == false);
+
+        logger.debug("{}: Caching notification {}, remote schema not yet fully built", id, notification);
+        if(logger.isTraceEnabled()) {
+            logger.trace("{}: Caching notification {}", id, XmlUtil.toString(notification.getDocument()));
+        }
+
+        queue.add(notification);
+    }
+
+    private void passNotification(final CompositeNode parsedNotification) {
+        logger.debug("{}: Forwarding notification {}", id, parsedNotification);
+        Preconditions.checkNotNull(parsedNotification);
+        salFacade.onNotification(parsedNotification);
+    }
+}
index 2f24adcdbed9eee793b5833b04d108dafada12d6..aadb911f453a0613ed6b5470e5ee8cd7cba6e7cf 100644 (file)
@@ -51,8 +51,10 @@ public class NetconfDeviceCommunicator implements NetconfClientSessionListener,
     private final RemoteDeviceId id;
     private final Lock sessionLock = new ReentrantLock();
 
+    // TODO implement concurrent message limit
     private final Queue<Request> requests = new ArrayDeque<>();
     private NetconfClientSession session;
+    private Future<?> initFuture;
 
     public NetconfDeviceCommunicator(final RemoteDeviceId id, final RemoteDevice<NetconfSessionCapabilities, NetconfMessage> remoteDevice,
             final NetconfSessionCapabilities netconfSessionCapabilities) {
@@ -97,9 +99,9 @@ public class NetconfDeviceCommunicator implements NetconfClientSessionListener,
     public void initializeRemoteConnection(final NetconfClientDispatcher dispatch,
                                            final NetconfClientConfiguration config) {
         if(config instanceof NetconfReconnectingClientConfiguration) {
-            dispatch.createReconnectingClient((NetconfReconnectingClientConfiguration) config);
+            initFuture = dispatch.createReconnectingClient((NetconfReconnectingClientConfiguration) config);
         } else {
-            dispatch.createClient(config);
+            initFuture = dispatch.createClient(config);
         }
     }
 
@@ -172,7 +174,15 @@ public class NetconfDeviceCommunicator implements NetconfClientSessionListener,
 
     @Override
     public void close() {
-        tearDown( String.format( "The netconf session to %1$s has been closed", id.getName() ) );
+        // Cancel reconnect if in progress
+        if(initFuture != null) {
+            initFuture.cancel(false);
+        }
+        // Disconnect from device
+        if(session != null) {
+            session.close();
+        }
+        tearDown(id + ": Netconf session closed");
     }
 
     @Override
@@ -191,12 +201,12 @@ public class NetconfDeviceCommunicator implements NetconfClientSessionListener,
     private void processMessage(final NetconfMessage message) {
         Request request = null;
         sessionLock.lock();
+
         try {
             request = requests.peek();
-            if (request.future.isUncancellable()) {
+            if (request != null && request.future.isUncancellable()) {
                 requests.poll();
-            }
-            else {
+            } else {
                 request = null;
                 logger.warn("{}: Ignoring unsolicited message {}", id, msgToS(message));
             }
index dbef290197b91ea2e7eb55e19fbb9363cadace16..3cc513600dd709484f464fbae0530c7df67b4f29 100644 (file)
@@ -7,11 +7,12 @@
  */
 package org.opendaylight.controller.sal.connect.netconf.sal;
 
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ExecutorService;
-
 import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
 import org.opendaylight.controller.sal.binding.api.BindingAwareBroker;
 import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
@@ -30,14 +31,10 @@ import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.CompositeNode;
 import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
 import org.osgi.framework.BundleContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-
 public final class NetconfDeviceSalFacade implements AutoCloseable, RemoteDeviceHandler<NetconfSessionCapabilities> {
 
     private static final Logger logger= LoggerFactory.getLogger(NetconfDeviceSalFacade.class);
@@ -64,11 +61,9 @@ public final class NetconfDeviceSalFacade implements AutoCloseable, RemoteDevice
     }
 
     @Override
-    public synchronized void onDeviceConnected(final SchemaContextProvider remoteSchemaContextProvider,
+    public synchronized void onDeviceConnected(final SchemaContext schemaContext,
                                                final NetconfSessionCapabilities netconfSessionPreferences, final RpcImplementation deviceRpc) {
-        final SchemaContext schemaContext = remoteSchemaContextProvider.getSchemaContext();
 
-        // TODO remove deprecated SchemaContextProvider from SchemaAwareRpcBroker
         // TODO move SchemaAwareRpcBroker from sal-broker-impl, now we have depend on the whole sal-broker-impl
         final RpcProvisionRegistry rpcRegistry = new SchemaAwareRpcBroker(id.getPath().toString(), new org.opendaylight.controller.sal.dom.broker.impl.SchemaContextProvider() {
             @Override
index 04a99511a1dfaa6154a9894e47906d859de0c199..6c46bed7626f27fd86d1f4b11c9462bbda395611 100644 (file)
@@ -7,12 +7,6 @@
  */
 package org.opendaylight.controller.sal.connect.netconf.sal.tx;
 
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.CONFIG_SOURCE_RUNNING;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_DATA_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_GET_CONFIG_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_GET_QNAME;
-import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.toFilterStructure;
-
 import com.google.common.base.Function;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
@@ -35,6 +29,14 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.concurrent.ExecutionException;
+
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.CONFIG_SOURCE_RUNNING;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_DATA_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_GET_CONFIG_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.NETCONF_GET_QNAME;
+import static org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil.toFilterStructure;
+
 
 public final class NetconfDeviceReadOnlyTx implements DOMDataReadOnlyTransaction {
 
@@ -55,7 +57,7 @@ public final class NetconfDeviceReadOnlyTx implements DOMDataReadOnlyTransaction
         final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_GET_CONFIG_QNAME,
                 NetconfMessageTransformUtil.wrap(NETCONF_GET_CONFIG_QNAME, CONFIG_SOURCE_RUNNING, toFilterStructure(path)));
 
-        ListenableFuture<Optional<NormalizedNode<?, ?>>> transformedFuture = Futures.transform(future, new Function<RpcResult<CompositeNode>, Optional<NormalizedNode<?, ?>>>() {
+        final ListenableFuture<Optional<NormalizedNode<?, ?>>> transformedFuture = Futures.transform(future, new Function<RpcResult<CompositeNode>, Optional<NormalizedNode<?, ?>>>() {
             @Override
             public Optional<NormalizedNode<?, ?>> apply(final RpcResult<CompositeNode> result) {
                 checkReadSuccess(result, path);
@@ -97,7 +99,7 @@ public final class NetconfDeviceReadOnlyTx implements DOMDataReadOnlyTransaction
             final YangInstanceIdentifier path) {
         final ListenableFuture<RpcResult<CompositeNode>> future = rpc.invokeRpc(NETCONF_GET_QNAME, NetconfMessageTransformUtil.wrap(NETCONF_GET_QNAME, toFilterStructure(path)));
 
-        ListenableFuture<Optional<NormalizedNode<?, ?>>> transformedFuture = Futures.transform(future, new Function<RpcResult<CompositeNode>, Optional<NormalizedNode<?, ?>>>() {
+        final ListenableFuture<Optional<NormalizedNode<?, ?>>> transformedFuture = Futures.transform(future, new Function<RpcResult<CompositeNode>, Optional<NormalizedNode<?, ?>>>() {
             @Override
             public Optional<NormalizedNode<?, ?>> apply(final RpcResult<CompositeNode> result) {
                 checkReadSuccess(result, path);
@@ -136,6 +138,19 @@ public final class NetconfDeviceReadOnlyTx implements DOMDataReadOnlyTransaction
         throw new IllegalArgumentException(String.format("%s, Cannot read data %s for %s datastore, unknown datastore type", id, path, store));
     }
 
+    @Override public CheckedFuture<Boolean, ReadFailedException> exists(
+        LogicalDatastoreType store,
+        YangInstanceIdentifier path) {
+        CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>
+            data = read(store, path);
+
+        try {
+            return Futures.immediateCheckedFuture(data.get().isPresent());
+        } catch (InterruptedException | ExecutionException e) {
+            return Futures.immediateFailedCheckedFuture(new ReadFailedException("Exists failed",e));
+        }
+    }
+
     static YangInstanceIdentifier toLegacyPath(final DataNormalizer normalizer, final YangInstanceIdentifier path, final RemoteDeviceId id) {
         try {
             return normalizer.toLegacy(path);
index 3d2c3b9d44b95fc90355ba7ce1085b1749520b9f..11362a2f9b84d5fe71013a505aad94dce4ed98ec 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.sal.connect.netconf.sal.tx;
 
 import com.google.common.base.Optional;
 import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 
 import org.opendaylight.controller.md.sal.common.api.TransactionStatus;
@@ -23,6 +24,8 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 
+import java.util.concurrent.ExecutionException;
+
 public class NetconfDeviceReadWriteTx implements DOMDataReadWriteTransaction {
 
     private final DOMDataReadTransaction delegateReadTx;
@@ -69,6 +72,19 @@ public class NetconfDeviceReadWriteTx implements DOMDataReadWriteTransaction {
         return delegateReadTx.read(store, path);
     }
 
+    @Override public CheckedFuture<Boolean, ReadFailedException> exists(
+        LogicalDatastoreType store,
+        YangInstanceIdentifier path) {
+        CheckedFuture<Optional<NormalizedNode<?, ?>>, ReadFailedException>
+            data = read(store, path);
+
+        try {
+            return Futures.immediateCheckedFuture(data.get().isPresent());
+        } catch (InterruptedException | ExecutionException e) {
+            return Futures.immediateFailedCheckedFuture(new ReadFailedException("Exists failed",e));
+        }
+    }
+
     @Override
     public Object getIdentifier() {
         return this;
diff --git a/opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/schema/NetconfDeviceSchemaProviderFactory.java b/opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/schema/NetconfDeviceSchemaProviderFactory.java
deleted file mode 100644 (file)
index e7d6464..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.connect.netconf.schema;
-
-import java.io.InputStream;
-import java.util.Collection;
-import java.util.List;
-import java.util.Set;
-
-import org.opendaylight.controller.sal.connect.api.SchemaContextProviderFactory;
-import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.model.api.Module;
-import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
-import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProvider;
-import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
-import org.opendaylight.yangtools.yang.parser.impl.util.YangSourceContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-
-public final class NetconfDeviceSchemaProviderFactory implements SchemaContextProviderFactory {
-
-    private static final Logger logger = LoggerFactory.getLogger(NetconfDeviceSchemaProviderFactory.class);
-
-    private final RemoteDeviceId id;
-
-    public NetconfDeviceSchemaProviderFactory(final RemoteDeviceId id) {
-        this.id = id;
-    }
-
-    @Override
-    public SchemaContextProvider createContextProvider(final Collection<QName> capabilities, final SchemaSourceProvider<InputStream> sourceProvider) {
-
-        final YangSourceContext sourceContext = YangSourceContext.createFrom(capabilities, sourceProvider);
-
-        if (sourceContext.getMissingSources().isEmpty() == false) {
-            logger.warn("{}: Sources for following models are missing {}", id, sourceContext.getMissingSources());
-        }
-
-        logger.debug("{}: Trying to create schema context from {}", id, sourceContext.getValidSources());
-        final List<InputStream> modelsToParse = YangSourceContext.getValidInputStreams(sourceContext);
-
-        Preconditions.checkState(sourceContext.getValidSources().isEmpty() == false,
-                "%s: Unable to create schema context, no sources provided by device", id);
-        try {
-            final SchemaContext schemaContext = tryToParseContext(modelsToParse);
-            logger.debug("{}: Schema context successfully created.", id);
-            return new NetconfSchemaContextProvider(schemaContext);
-        } catch (final RuntimeException e) {
-            logger.error("{}: Unable to create schema context, unexpected error", id, e);
-            throw new IllegalStateException(id + ": Unable to create schema context", e);
-        }
-    }
-
-    private static SchemaContext tryToParseContext(final List<InputStream> modelsToParse) {
-        final YangParserImpl parser = new YangParserImpl();
-        final Set<Module> models = parser.parseYangModelsFromStreams(modelsToParse);
-        return parser.resolveSchemaContext(models);
-    }
-
-    private static final class NetconfSchemaContextProvider implements SchemaContextProvider {
-        private final SchemaContext schemaContext;
-
-        public NetconfSchemaContextProvider(final SchemaContext schemaContext) {
-            this.schemaContext = schemaContext;
-        }
-
-        @Override
-        public SchemaContext getSchemaContext() {
-            return schemaContext;
-        }
-    }
-}
diff --git a/opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/schema/NetconfRemoteSchemaSourceProvider.java b/opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/schema/NetconfRemoteSchemaSourceProvider.java
deleted file mode 100644 (file)
index 44ff2ef..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.sal.connect.netconf.schema;
-
-import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
-import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
-import org.opendaylight.controller.sal.core.api.RpcImplementation;
-import org.opendaylight.yangtools.yang.common.QName;
-import org.opendaylight.yangtools.yang.common.RpcResult;
-import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.data.api.SimpleNode;
-import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
-import org.opendaylight.yangtools.yang.data.impl.util.CompositeNodeBuilder;
-import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-
-public final class NetconfRemoteSchemaSourceProvider implements SchemaSourceProvider<String> {
-
-    public static final QName GET_SCHEMA_QNAME = QName.create(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING,
-            "get-schema");
-    public static final QName GET_DATA_QNAME = QName
-            .create(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING, "data");
-
-    private static final Logger logger = LoggerFactory.getLogger(NetconfRemoteSchemaSourceProvider.class);
-
-    private final RpcImplementation rpc;
-    private final RemoteDeviceId id;
-
-    public NetconfRemoteSchemaSourceProvider(final RemoteDeviceId id, final RpcImplementation rpc) {
-        this.id = id;
-        this.rpc = Preconditions.checkNotNull(rpc);
-    }
-
-    @Override
-    public Optional<String> getSchemaSource(final String moduleName, final Optional<String> revision) {
-        final ImmutableCompositeNode getSchemaRequest = createGetSchemaRequest(moduleName, revision);
-
-        logger.trace("{}: Loading YANG schema source for {}:{}", id, moduleName, revision);
-        try {
-            final RpcResult<CompositeNode> schemaReply = rpc.invokeRpc(GET_SCHEMA_QNAME, getSchemaRequest).get();
-            if (schemaReply.isSuccessful()) {
-                final Optional<String> schemaBody = getSchemaFromRpc(id, schemaReply.getResult());
-                if (schemaBody.isPresent()) {
-                    logger.debug("{}: YANG Schema successfully retrieved for {}:{}", id, moduleName, revision);
-                    return schemaBody;
-                }
-            } else {
-                logger.warn("{}: YANG schema was not successfully retrieved for {}:{}. Errors: {}", id, moduleName,
-                        revision, schemaReply.getErrors());
-            }
-            return Optional.absent();
-        } catch (final InterruptedException e){
-            Thread.currentThread().interrupt();
-            throw new IllegalStateException(e);
-        } catch (final Exception e) {
-            logger.error("{}: YANG schema was not successfully retrieved for {}:{}", id, moduleName, revision, e);
-            throw new IllegalStateException(e);
-        }
-    }
-
-    private ImmutableCompositeNode createGetSchemaRequest(final String moduleName, final Optional<String> revision) {
-        final CompositeNodeBuilder<ImmutableCompositeNode> request = ImmutableCompositeNode.builder();
-        request.setQName(GET_SCHEMA_QNAME).addLeaf("identifier", moduleName);
-        if (revision.isPresent()) {
-            request.addLeaf("version", revision.get());
-        }
-        request.addLeaf("format", "yang");
-        return request.toInstance();
-    }
-
-    private static Optional<String> getSchemaFromRpc(final RemoteDeviceId id, final CompositeNode result) {
-        if (result == null) {
-            return Optional.absent();
-        }
-        final SimpleNode<?> simpleNode = result.getFirstSimpleByName(GET_DATA_QNAME.withoutRevision());
-
-        Preconditions.checkNotNull(simpleNode,
-                "%s Unexpected response to get-schema, expected response with one child %s, but was %s",
-                id, GET_DATA_QNAME.withoutRevision(), result);
-
-        final Object potential = simpleNode.getValue();
-        return potential instanceof String ? Optional.of((String) potential) : Optional.<String>absent();
-    }
-}
diff --git a/opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/schema/NetconfRemoteSchemaYangSourceProvider.java b/opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/schema/NetconfRemoteSchemaYangSourceProvider.java
new file mode 100644 (file)
index 0000000..dc90fd3
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.connect.netconf.schema;
+
+import com.google.common.base.Function;
+import com.google.common.base.Objects;
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.concurrent.ExecutionException;
+import org.apache.commons.io.IOUtils;
+import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
+import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.controller.sal.core.api.RpcImplementation;
+import org.opendaylight.yangtools.util.concurrent.ExceptionMapper;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.common.RpcResult;
+import org.opendaylight.yangtools.yang.data.api.CompositeNode;
+import org.opendaylight.yangtools.yang.data.api.SimpleNode;
+import org.opendaylight.yangtools.yang.data.impl.ImmutableCompositeNode;
+import org.opendaylight.yangtools.yang.data.impl.util.CompositeNodeBuilder;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaSourceException;
+import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.repo.api.YangTextSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public final class NetconfRemoteSchemaYangSourceProvider implements SchemaSourceProvider<YangTextSchemaSource> {
+
+    public static final QName GET_SCHEMA_QNAME = QName.create(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING,"get-schema");
+    public static final QName GET_DATA_QNAME = QName.create(NetconfMessageTransformUtil.IETF_NETCONF_MONITORING, "data");
+
+    private static final Logger logger = LoggerFactory.getLogger(NetconfRemoteSchemaYangSourceProvider.class);
+
+    private static final ExceptionMapper<SchemaSourceException> MAPPER = new ExceptionMapper<SchemaSourceException>(
+            "schemaDownload", SchemaSourceException.class) {
+        @Override
+        protected SchemaSourceException newWithCause(final String s, final Throwable throwable) {
+            return new SchemaSourceException(s, throwable);
+        }
+    };
+
+    private final RpcImplementation rpc;
+    private final RemoteDeviceId id;
+
+    public NetconfRemoteSchemaYangSourceProvider(final RemoteDeviceId id, final RpcImplementation rpc) {
+        this.id = id;
+        this.rpc = Preconditions.checkNotNull(rpc);
+    }
+
+    private ImmutableCompositeNode createGetSchemaRequest(final String moduleName, final Optional<String> revision) {
+        final CompositeNodeBuilder<ImmutableCompositeNode> request = ImmutableCompositeNode.builder();
+        request.setQName(GET_SCHEMA_QNAME).addLeaf("identifier", moduleName);
+        if (revision.isPresent()) {
+            request.addLeaf("version", revision.get());
+        }
+        request.addLeaf("format", "yang");
+        return request.toInstance();
+    }
+
+    private static Optional<String> getSchemaFromRpc(final RemoteDeviceId id, final CompositeNode result) {
+        if (result == null) {
+            return Optional.absent();
+        }
+        final SimpleNode<?> simpleNode = result.getFirstSimpleByName(GET_DATA_QNAME.withoutRevision());
+
+        Preconditions.checkNotNull(simpleNode,
+                "%s Unexpected response to get-schema, expected response with one child %s, but was %s", id,
+                GET_DATA_QNAME.withoutRevision(), result);
+
+        final Object potential = simpleNode.getValue();
+        return potential instanceof String ? Optional.of((String) potential) : Optional.<String> absent();
+    }
+
+    @Override
+    public CheckedFuture<YangTextSchemaSource, SchemaSourceException> getSource(final SourceIdentifier sourceIdentifier) {
+        final String moduleName = sourceIdentifier.getName();
+
+        // If formatted revision is SourceIdentifier.NOT_PRESENT_FORMATTED_REVISION, we have to omit it from request
+        final String formattedRevision = sourceIdentifier.getRevision().equals(SourceIdentifier.NOT_PRESENT_FORMATTED_REVISION) ? null : sourceIdentifier.getRevision();
+        final Optional<String> revision = Optional.fromNullable(formattedRevision);
+        final ImmutableCompositeNode getSchemaRequest = createGetSchemaRequest(moduleName, revision);
+
+        logger.trace("{}: Loading YANG schema source for {}:{}", id, moduleName, revision);
+
+        final ListenableFuture<YangTextSchemaSource> transformed = Futures.transform(
+                rpc.invokeRpc(GET_SCHEMA_QNAME, getSchemaRequest),
+                new ResultToYangSourceTransformer(id, sourceIdentifier, moduleName, revision));
+
+        // FIXME remove this get, it is only present to wait until source is retrieved
+        // (goal is to limit concurrent schema download, since NetconfDevice listener does not handle concurrent messages properly)
+        try {
+            logger.trace("{}: Blocking for {}", id, sourceIdentifier);
+            transformed.get();
+        } catch (final InterruptedException e) {
+            throw new RuntimeException(e);
+        } catch (final ExecutionException e) {
+           throw new IllegalStateException(id + ": Failed while getting source: " + sourceIdentifier, e);
+        }
+
+        return Futures.makeChecked(transformed, MAPPER);
+    }
+
+    /**
+     * Transform composite node to string schema representation and then to ASTSchemaSource
+     */
+    private static final class ResultToYangSourceTransformer implements
+            Function<RpcResult<CompositeNode>, YangTextSchemaSource> {
+
+        private final RemoteDeviceId id;
+        private final SourceIdentifier sourceIdentifier;
+        private final String moduleName;
+        private final Optional<String> revision;
+
+        public ResultToYangSourceTransformer(final RemoteDeviceId id, final SourceIdentifier sourceIdentifier,
+                final String moduleName, final Optional<String> revision) {
+            this.id = id;
+            this.sourceIdentifier = sourceIdentifier;
+            this.moduleName = moduleName;
+            this.revision = revision;
+        }
+
+        @Override
+        public YangTextSchemaSource apply(final RpcResult<CompositeNode> input) {
+
+            if (input.isSuccessful()) {
+
+                final Optional<String> schemaString = getSchemaFromRpc(id, input.getResult());
+
+                Preconditions.checkState(schemaString.isPresent(),
+                        "%s: Unexpected response to get-schema, schema not present in message for: %s", id, sourceIdentifier);
+
+                logger.debug("{}: YANG Schema successfully retrieved for {}:{}", id, moduleName, revision);
+
+                return new NetconfYangTextSchemaSource(id, sourceIdentifier, schemaString);
+            }
+
+            logger.warn("{}: YANG schema was not successfully retrieved for {}. Errors: {}", id, sourceIdentifier,
+                    input.getErrors());
+
+            throw new IllegalStateException(String.format(
+                    "%s: YANG schema was not successfully retrieved for %s. Errors: %s", id, sourceIdentifier,
+                    input.getErrors()));
+
+        }
+
+    }
+
+    private static class NetconfYangTextSchemaSource extends YangTextSchemaSource {
+        private final RemoteDeviceId id;
+        private final Optional<String> schemaString;
+
+        public NetconfYangTextSchemaSource(final RemoteDeviceId id, final SourceIdentifier sId, final Optional<String> schemaString) {
+            super(sId);
+            this.id = id;
+            this.schemaString = schemaString;
+        }
+
+        @Override
+        protected Objects.ToStringHelper addToStringAttributes(final Objects.ToStringHelper toStringHelper) {
+            return toStringHelper.add("device", id);
+        }
+
+        @Override
+        public InputStream openStream() throws IOException {
+            return IOUtils.toInputStream(schemaString.get());
+        }
+    }
+}
index 1e3cf4b6fce9f30917ef4c3bc3c19a61bf9b7d64..893a45aaa2df41eadc71a1a62215493fda3d4a04 100644 (file)
@@ -49,6 +49,8 @@ import org.w3c.dom.Element;
 
 public class NetconfMessageTransformUtil {
 
+    public static final String MESSAGE_ID_ATTR = "message-id";
+
     private NetconfMessageTransformUtil() {}
 
     public static final QName IETF_NETCONF_MONITORING = QName.create(NetconfState.QNAME, "ietf-netconf-monitoring");
@@ -125,8 +127,8 @@ public class NetconfMessageTransformUtil {
 
     public static void checkValidReply(final NetconfMessage input, final NetconfMessage output)
             throws NetconfDocumentedException {
-        final String inputMsgId = input.getDocument().getDocumentElement().getAttribute("message-id");
-        final String outputMsgId = output.getDocument().getDocumentElement().getAttribute("message-id");
+        final String inputMsgId = input.getDocument().getDocumentElement().getAttribute(MESSAGE_ID_ATTR);
+        final String outputMsgId = output.getDocument().getDocumentElement().getAttribute(MESSAGE_ID_ATTR);
 
         if(inputMsgId.equals(outputMsgId) == false) {
             Map<String,String> errorInfo = ImmutableMap.<String,String>builder()
index fa488dadd3efb4afefc48327b683a44f296727dc..218ec0be8d319b91abdb8a1054c93f09675dbc68 100644 (file)
@@ -8,14 +8,18 @@
 package org.opendaylight.controller.sal.connect.netconf;
 
 import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyCollectionOf;
 import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.timeout;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
 import com.google.common.base.Optional;
+import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.Futures;
 import java.io.InputStream;
@@ -28,12 +32,13 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import org.junit.Test;
 import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 import org.opendaylight.controller.netconf.api.NetconfMessage;
 import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
 import org.opendaylight.controller.sal.connect.api.MessageTransformer;
 import org.opendaylight.controller.sal.connect.api.RemoteDeviceCommunicator;
 import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
-import org.opendaylight.controller.sal.connect.api.SchemaContextProviderFactory;
 import org.opendaylight.controller.sal.connect.api.SchemaSourceProviderFactory;
 import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
 import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceRpc;
@@ -45,8 +50,15 @@ import org.opendaylight.yangtools.yang.common.RpcResult;
 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 import org.opendaylight.yangtools.yang.data.api.CompositeNode;
 import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.ModuleImport;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
+import org.opendaylight.yangtools.yang.model.repo.api.MissingSchemaSourceException;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaContextFactory;
+import org.opendaylight.yangtools.yang.model.repo.api.SchemaResolutionException;
+import org.opendaylight.yangtools.yang.model.repo.api.SourceIdentifier;
+import org.opendaylight.yangtools.yang.model.repo.spi.PotentialSchemaSource;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceRegistration;
+import org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceRegistry;
 import org.opendaylight.yangtools.yang.model.util.repo.SchemaSourceProvider;
 import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
 
@@ -70,7 +82,13 @@ public class NetconfDeviceTest {
     public static final String TEST_NAMESPACE = "test:namespace";
     public static final String TEST_MODULE = "test-module";
     public static final String TEST_REVISION = "2013-07-22";
-    private NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver = new NetconfStateSchemas.NetconfStateSchemasResolver() {
+    public static final SourceIdentifier TEST_SID = new SourceIdentifier(TEST_MODULE, Optional.of(TEST_REVISION));
+    public static final String TEST_CAPABILITY = TEST_NAMESPACE + "?module=" + TEST_MODULE + "&amp;revision=" + TEST_REVISION;
+
+    public static final SourceIdentifier TEST_SID2 = new SourceIdentifier(TEST_MODULE + "2", Optional.of(TEST_REVISION));
+    public static final String TEST_CAPABILITY2 = TEST_NAMESPACE + "?module=" + TEST_MODULE + "2" + "&amp;revision=" + TEST_REVISION;
+
+    private static final NetconfStateSchemas.NetconfStateSchemasResolver stateSchemasResolver = new NetconfStateSchemas.NetconfStateSchemasResolver() {
 
         @Override
         public NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionCapabilities remoteSessionCapabilities, final RemoteDeviceId id) {
@@ -79,14 +97,71 @@ public class NetconfDeviceTest {
     };
 
     @Test
-    public void testNetconfDeviceWithoutMonitoring() throws Exception {
+    public void testNetconfDeviceFailFirstSchemaFailSecondEmpty() throws Exception {
+        final ArrayList<String> capList = Lists.newArrayList(TEST_CAPABILITY);
+
         final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
         final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
 
-        final NetconfDevice device = new NetconfDevice(getId(), facade, getExecutor(), getMessageTransformer(), getSchemaContextProviderFactory(), getSourceProviderFactory(), stateSchemasResolver);
-        device.onRemoteSessionUp(getSessionCaps(false, Collections.<String>emptyList()), listener);
+        final SchemaContextFactory schemaFactory = getSchemaFactory();
+
+        // Make fallback attempt to fail due to empty resolved sources
+        final SchemaResolutionException schemaResolutionException
+                = new SchemaResolutionException("fail first",
+                Collections.<SourceIdentifier>emptyList(), HashMultimap.<SourceIdentifier, ModuleImport>create());
+        doReturn(Futures.immediateFailedCheckedFuture(
+                schemaResolutionException))
+                .when(schemaFactory).createSchemaContext(anyCollectionOf(SourceIdentifier.class));
+
+        final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
+                = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, stateSchemasResolver);
+        final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), getMessageTransformer());
+        // Monitoring not supported
+        final NetconfSessionCapabilities sessionCaps = getSessionCaps(false, capList);
+        device.onRemoteSessionUp(sessionCaps, listener);
 
         Mockito.verify(facade, Mockito.timeout(5000)).onDeviceDisconnected();
+        Mockito.verify(listener, Mockito.timeout(5000)).close();
+        Mockito.verify(schemaFactory, times(1)).createSchemaContext(anyCollectionOf(SourceIdentifier.class));
+    }
+
+    @Test
+    public void testNetconfDeviceMissingSource() throws Exception {
+        final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
+        final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
+
+        final SchemaContextFactory schemaFactory = getSchemaFactory();
+
+        // Make fallback attempt to fail due to empty resolved sources
+        final MissingSchemaSourceException schemaResolutionException = new MissingSchemaSourceException("fail first", TEST_SID);
+        doAnswer(new Answer() {
+            @Override
+            public Object answer(final InvocationOnMock invocation) throws Throwable {
+                if(((Collection<?>) invocation.getArguments()[0]).size() == 2) {
+                    return Futures.immediateFailedCheckedFuture(schemaResolutionException);
+                } else {
+                    return Futures.immediateCheckedFuture(getSchema());
+                }
+            }
+        }).when(schemaFactory).createSchemaContext(anyCollectionOf(SourceIdentifier.class));
+
+        final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
+                = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, stateSchemasResolver);
+        final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), getMessageTransformer());
+        // Monitoring supported
+        final NetconfSessionCapabilities sessionCaps = getSessionCaps(true, Lists.newArrayList(TEST_CAPABILITY, TEST_CAPABILITY2));
+        device.onRemoteSessionUp(sessionCaps, listener);
+
+        Mockito.verify(facade, Mockito.timeout(5000)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+        Mockito.verify(schemaFactory, times(2)).createSchemaContext(anyCollectionOf(SourceIdentifier.class));
+    }
+
+    private SchemaSourceRegistry getSchemaRegistry() {
+        final SchemaSourceRegistry mock = mock(SchemaSourceRegistry.class);
+        final SchemaSourceRegistration mockReg = mock(SchemaSourceRegistration.class);
+        doNothing().when(mockReg).close();
+        doReturn(mockReg).when(mock).registerSchemaSource(any(org.opendaylight.yangtools.yang.model.repo.spi.SchemaSourceProvider.class), any(PotentialSchemaSource.class));
+        return mock;
     }
 
     @Test
@@ -95,7 +170,10 @@ public class NetconfDeviceTest {
         final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
 
         final MessageTransformer<NetconfMessage> messageTransformer = getMessageTransformer();
-        final NetconfDevice device = new NetconfDevice(getId(), facade, getExecutor(), messageTransformer, getSchemaContextProviderFactory(), getSourceProviderFactory(), stateSchemasResolver);
+
+        final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
+                = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), getSchemaFactory(), stateSchemasResolver);
+        final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), messageTransformer);
 
         device.onNotification(netconfMessage);
         device.onNotification(netconfMessage);
@@ -103,7 +181,7 @@ public class NetconfDeviceTest {
         verify(facade, times(0)).onNotification(any(CompositeNode.class));
 
         final NetconfSessionCapabilities sessionCaps = getSessionCaps(true,
-                Lists.newArrayList(TEST_NAMESPACE + "?module=" + TEST_MODULE + "&amp;revision=" + TEST_REVISION));
+                Lists.newArrayList(TEST_CAPABILITY));
 
         device.onRemoteSessionUp(sessionCaps, listener);
 
@@ -120,40 +198,34 @@ public class NetconfDeviceTest {
         final RemoteDeviceHandler<NetconfSessionCapabilities> facade = getFacade();
         final RemoteDeviceCommunicator<NetconfMessage> listener = getListener();
 
-        final SchemaContextProviderFactory schemaContextProviderFactory = getSchemaContextProviderFactory();
-        final SchemaSourceProviderFactory<InputStream> sourceProviderFactory = getSourceProviderFactory();
+        final SchemaContextFactory schemaContextProviderFactory = getSchemaFactory();
         final MessageTransformer<NetconfMessage> messageTransformer = getMessageTransformer();
 
-        final NetconfDevice device = new NetconfDevice(getId(), facade, getExecutor(), messageTransformer, schemaContextProviderFactory, sourceProviderFactory, stateSchemasResolver);
+        final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
+                = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaContextProviderFactory, stateSchemasResolver);
+        final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), messageTransformer);
         final NetconfSessionCapabilities sessionCaps = getSessionCaps(true,
                 Lists.newArrayList(TEST_NAMESPACE + "?module=" + TEST_MODULE + "&amp;revision=" + TEST_REVISION));
         device.onRemoteSessionUp(sessionCaps, listener);
 
-        verify(sourceProviderFactory, timeout(5000)).createSourceProvider(any(RpcImplementation.class));
-        verify(schemaContextProviderFactory, timeout(5000)).createContextProvider(any(Collection.class), any(SchemaSourceProvider.class));
+        verify(schemaContextProviderFactory, timeout(5000)).createSchemaContext(any(Collection.class));
         verify(messageTransformer, timeout(5000)).onGlobalContextUpdated(any(SchemaContext.class));
-        verify(facade, timeout(5000)).onDeviceConnected(any(SchemaContextProvider.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+        verify(facade, timeout(5000)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
 
         device.onRemoteSessionDown();
         verify(facade, timeout(5000)).onDeviceDisconnected();
 
         device.onRemoteSessionUp(sessionCaps, listener);
 
-        verify(sourceProviderFactory, timeout(5000).times(2)).createSourceProvider(any(RpcImplementation.class));
-        verify(schemaContextProviderFactory, timeout(5000).times(2)).createContextProvider(any(Collection.class), any(SchemaSourceProvider.class));
+        verify(schemaContextProviderFactory, timeout(5000).times(2)).createSchemaContext(any(Collection.class));
         verify(messageTransformer, timeout(5000).times(2)).onGlobalContextUpdated(any(SchemaContext.class));
-        verify(facade, timeout(5000).times(2)).onDeviceConnected(any(SchemaContextProvider.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+        verify(facade, timeout(5000).times(2)).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
     }
 
-    private SchemaContextProviderFactory getSchemaContextProviderFactory() {
-        final SchemaContextProviderFactory schemaContextProviderFactory = mockClass(SchemaContextProviderFactory.class);
-        doReturn(new SchemaContextProvider() {
-            @Override
-            public SchemaContext getSchemaContext() {
-                return getSchema();
-            }
-        }).when(schemaContextProviderFactory).createContextProvider(any(Collection.class), any(SchemaSourceProvider.class));
-        return schemaContextProviderFactory;
+    private SchemaContextFactory getSchemaFactory() {
+        final SchemaContextFactory schemaFactory = mockClass(SchemaContextFactory.class);
+        doReturn(Futures.immediateCheckedFuture(getSchema())).when(schemaFactory).createSchemaContext(any(Collection.class));
+        return schemaFactory;
     }
 
     public static SchemaContext getSchema() {
@@ -167,7 +239,7 @@ public class NetconfDeviceTest {
 
     private RemoteDeviceHandler<NetconfSessionCapabilities> getFacade() throws Exception {
         final RemoteDeviceHandler<NetconfSessionCapabilities> remoteDeviceHandler = mockCloseableClass(RemoteDeviceHandler.class);
-        doNothing().when(remoteDeviceHandler).onDeviceConnected(any(SchemaContextProvider.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
+        doNothing().when(remoteDeviceHandler).onDeviceConnected(any(SchemaContext.class), any(NetconfSessionCapabilities.class), any(RpcImplementation.class));
         doNothing().when(remoteDeviceHandler).onDeviceDisconnected();
         doNothing().when(remoteDeviceHandler).onNotification(any(CompositeNode.class));
         return remoteDeviceHandler;
@@ -190,7 +262,7 @@ public class NetconfDeviceTest {
     }
 
     private static <T> T mockClass(final Class<T> remoteDeviceHandlerClass) {
-        final T mock = Mockito.mock(remoteDeviceHandlerClass);
+        final T mock = mock(remoteDeviceHandlerClass);
         Mockito.doReturn(remoteDeviceHandlerClass.getSimpleName()).when(mock).toString();
         return mock;
     }
index a2bee8ffee7295c8c086365fafa1651235f4efdc..674c5bf5a596bb03a34f785d854ffdb305aac12b 100644 (file)
       <groupId>com.typesafe.akka</groupId>
       <artifactId>akka-osgi_${scala.version}</artifactId>
     </dependency>
+
+  <dependency>
+     <groupId>com.typesafe.akka</groupId>
+     <artifactId>akka-slf4j_${scala.version}</artifactId>
+  </dependency>
     <!-- SAL Dependencies -->
 
     <dependency>
       <scope>test</scope>
     </dependency>
 
-    <dependency>
+      <dependency>
       <groupId>org.slf4j</groupId>
       <artifactId>slf4j-simple</artifactId>
       <version>${slf4j.version}</version>
index 5c56455bd0c208a40709b4a221fb6a22ed0b65a1..514a2f141daea13e5e71ec1f0f0a8acf9eadcf9d 100644 (file)
@@ -17,7 +17,7 @@ import akka.japi.Creator;
 import akka.japi.Function;
 import org.opendaylight.controller.remote.rpc.messages.UpdateSchemaContext;
 import org.opendaylight.controller.remote.rpc.registry.ClusterWrapper;
-import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistryOld;
 import org.opendaylight.controller.sal.core.api.Broker;
 import org.opendaylight.controller.sal.core.api.RpcProvisionRegistry;
 import org.opendaylight.yangtools.yang.common.QName;
@@ -72,7 +72,7 @@ public class RpcManager extends AbstractUntypedActor {
   private void createRpcActors() {
     LOG.debug("Create rpc registry and broker actors");
 
-    rpcRegistry = getContext().actorOf(RpcRegistry.props(clusterWrapper), ActorConstants.RPC_REGISTRY);
+    rpcRegistry = getContext().actorOf(RpcRegistryOld.props(clusterWrapper), ActorConstants.RPC_REGISTRY);
     rpcBroker = getContext().actorOf(RpcBroker.props(brokerSession, rpcRegistry, schemaContext), ActorConstants.RPC_BROKER);
   }
 
index 5e19653a22d21ed83ae3eec370290dc37f50c1ce..d21d05d7fe9b02ae9a604a2f6c0567706ec2f68a 100644 (file)
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-
 package org.opendaylight.controller.remote.rpc.registry;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableSet;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.LinkedHashSet;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-
-public class RoutingTable<I, R> {
-
-  private final Logger LOG = LoggerFactory.getLogger(RoutingTable.class);
+import akka.actor.ActorRef;
+import akka.japi.Option;
+import akka.japi.Pair;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Copier;
+import org.opendaylight.controller.sal.connector.api.RpcRouter;
 
-  private ConcurrentMap<I,R> globalRpcMap = new ConcurrentHashMap<>();
-  private ConcurrentMap<I, LinkedHashSet<R>> routedRpcMap = new ConcurrentHashMap<>();
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Map;
 
-  public ConcurrentMap<I, R> getGlobalRpcMap() {
-    return globalRpcMap;
-  }
+public class RoutingTable implements Copier<RoutingTable>, Serializable {
 
-  public ConcurrentMap<I, LinkedHashSet<R>> getRoutedRpcMap() {
-    return routedRpcMap;
-  }
+    private Map<RpcRouter.RouteIdentifier<?, ?, ?>, Long> table = new HashMap<>();
+    private ActorRef router;
 
-  public R getGlobalRoute(final I routeId) {
-    Preconditions.checkNotNull(routeId, "getGlobalRoute: routeId cannot be null!");
-    return globalRpcMap.get(routeId);
-  }
+    @Override
+    public RoutingTable copy() {
+        RoutingTable copy = new RoutingTable();
+        copy.setTable(new HashMap<>(table));
+        copy.setRouter(this.getRouter());
 
-  public void addGlobalRoute(final I routeId, final R route) {
-    Preconditions.checkNotNull(routeId, "addGlobalRoute: routeId cannot be null!");
-    Preconditions.checkNotNull(route, "addGlobalRoute: route cannot be null!");
-    LOG.debug("addGlobalRoute: adding  a new route with id[{}] and value [{}]", routeId, route);
-    if(globalRpcMap.putIfAbsent(routeId, route) != null) {
-      LOG.debug("A route already exist for route id [{}] ", routeId);
+        return copy;
     }
-  }
 
-  public void removeGlobalRoute(final I routeId) {
-    Preconditions.checkNotNull(routeId, "removeGlobalRoute: routeId cannot be null!");
-    LOG.debug("removeGlobalRoute: removing  a new route with id [{}]", routeId);
-    globalRpcMap.remove(routeId);
-  }
+    public Option<Pair<ActorRef, Long>> getRouterFor(RpcRouter.RouteIdentifier<?, ?, ?> routeId){
+        Long updatedTime = table.get(routeId);
 
-  public Set<R> getRoutedRpc(final I routeId) {
-    Preconditions.checkNotNull(routeId, "getRoutes: routeId cannot be null!");
-    Set<R> routes = routedRpcMap.get(routeId);
-
-    if (routes == null) {
-      return Collections.emptySet();
+        if (updatedTime == null || router == null)
+            return Option.none();
+        else
+            return Option.option(new Pair<>(router, updatedTime));
     }
 
-    return ImmutableSet.copyOf(routes);
-  }
-
-  public R getLastAddedRoutedRpc(final I routeId) {
-
-    Set<R> routes = getRoutedRpc(routeId);
-
-    if (routes.isEmpty()) {
-      return null;
+    public void addRoute(RpcRouter.RouteIdentifier<?,?,?> routeId){
+        table.put(routeId, System.currentTimeMillis());
     }
 
-    R route = null;
-    Iterator<R> iter = routes.iterator();
-    while (iter.hasNext()) {
-      route = iter.next();
+    public void removeRoute(RpcRouter.RouteIdentifier<?, ?, ?> routeId){
+        table.remove(routeId);
     }
 
-    return route;
-  }
-
-  public void addRoutedRpc(final I routeId, final R route)   {
-    Preconditions.checkNotNull(routeId, "addRoute: routeId cannot be null");
-    Preconditions.checkNotNull(route, "addRoute: route cannot be null");
-    LOG.debug("addRoute: adding a route with k/v [{}/{}]", routeId, route);
-    threadSafeAdd(routeId, route);
-  }
-
-  public void addRoutedRpcs(final Set<I> routeIds, final R route) {
-    Preconditions.checkNotNull(routeIds, "addRoutes: routeIds must not be null");
-    for (I routeId : routeIds){
-      addRoutedRpc(routeId, route);
+    public Boolean contains(RpcRouter.RouteIdentifier<?, ?, ?> routeId){
+        return table.containsKey(routeId);
     }
-  }
 
-  public void removeRoute(final I routeId, final R route) {
-    Preconditions.checkNotNull(routeId, "removeRoute: routeId cannot be null!");
-    Preconditions.checkNotNull(route, "removeRoute: route cannot be null!");
-
-    LinkedHashSet<R> routes = routedRpcMap.get(routeId);
-    if (routes == null) {
-      return;
+    public Boolean isEmpty(){
+        return table.isEmpty();
     }
-    LOG.debug("removeRoute: removing  a new route with k/v [{}/{}]", routeId, route);
-    threadSafeRemove(routeId, route);
-  }
-
-  public void removeRoutes(final Set<I> routeIds, final R route) {
-    Preconditions.checkNotNull(routeIds, "removeRoutes: routeIds must not be null");
-    for (I routeId : routeIds){
-      removeRoute(routeId, route);
+    ///
+    /// Getter, Setters
+    ///
+    //TODO: Remove public
+    public Map<RpcRouter.RouteIdentifier<?, ?, ?>, Long> getTable() {
+        return table;
     }
-  }
-
-  /**
-   * This method guarantees that no 2 thread over write each other's changes.
-   * Just so that we dont end up in infinite loop, it tries for 100 times then throw
-   */
-  private void threadSafeAdd(final I routeId, final R route) {
 
-    for (int i=0;i<100;i++){
-
-      LinkedHashSet<R> updatedRoutes = new LinkedHashSet<>();
-      updatedRoutes.add(route);
-      LinkedHashSet<R> oldRoutes = routedRpcMap.putIfAbsent(routeId, updatedRoutes);
-      if (oldRoutes == null) {
-        return;
-      }
+    void setTable(Map<RpcRouter.RouteIdentifier<?, ?, ?>, Long> table) {
+        this.table = table;
+    }
 
-      updatedRoutes = new LinkedHashSet<>(oldRoutes);
-      updatedRoutes.add(route);
+    public ActorRef getRouter() {
+        return router;
+    }
 
-      if (routedRpcMap.replace(routeId, oldRoutes, updatedRoutes)) {
-        return;
-      }
+    public void setRouter(ActorRef router) {
+        this.router = router;
     }
-    //the method did not already return means it failed to add route in 100 attempts
-    throw new IllegalStateException("Failed to add route [" + routeId + "]");
-  }
-
-  /**
-   * This method guarantees that no 2 thread over write each other's changes.
-   * Just so that we dont end up in infinite loop, it tries for 100 times then throw
-   */
-  private void threadSafeRemove(final I routeId, final R route) {
-    LinkedHashSet<R> updatedRoutes = null;
-    for (int i=0;i<100;i++){
-      LinkedHashSet<R> oldRoutes = routedRpcMap.get(routeId);
-
-      // if route to be deleted is the only entry in the set then remove routeId from the cache
-      if ((oldRoutes.size() == 1) && oldRoutes.contains(route)){
-        routedRpcMap.remove(routeId);
-        return;
-      }
-
-      // if there are multiple routes for this routeId, remove the route to be deleted only from the set.
-      updatedRoutes = new LinkedHashSet<>(oldRoutes);
-      updatedRoutes.remove(route);
-      if (routedRpcMap.replace(routeId, oldRoutes, updatedRoutes)) {
-        return;
-      }
 
+    @Override
+    public String toString() {
+        return "RoutingTable{" +
+                "table=" + table +
+                ", router=" + router +
+                '}';
     }
-    //the method did not already return means it failed to remove route in 100 attempts
-    throw new IllegalStateException("Failed to remove route [" + routeId + "]");
-  }
 }
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/RoutingTableOld.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/RoutingTableOld.java
new file mode 100644 (file)
index 0000000..5951776
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.remote.rpc.registry;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+public class RoutingTableOld<I, R> {
+
+  private final Logger LOG = LoggerFactory.getLogger(RoutingTableOld.class);
+
+  private ConcurrentMap<I,R> globalRpcMap = new ConcurrentHashMap<>();
+  private ConcurrentMap<I, LinkedHashSet<R>> routedRpcMap = new ConcurrentHashMap<>();
+
+  public ConcurrentMap<I, R> getGlobalRpcMap() {
+    return globalRpcMap;
+  }
+
+  public ConcurrentMap<I, LinkedHashSet<R>> getRoutedRpcMap() {
+    return routedRpcMap;
+  }
+
+  public R getGlobalRoute(final I routeId) {
+    Preconditions.checkNotNull(routeId, "getGlobalRoute: routeId cannot be null!");
+    return globalRpcMap.get(routeId);
+  }
+
+  public void addGlobalRoute(final I routeId, final R route) {
+    Preconditions.checkNotNull(routeId, "addGlobalRoute: routeId cannot be null!");
+    Preconditions.checkNotNull(route, "addGlobalRoute: route cannot be null!");
+    LOG.debug("addGlobalRoute: adding  a new route with id[{}] and value [{}]", routeId, route);
+    if(globalRpcMap.putIfAbsent(routeId, route) != null) {
+      LOG.debug("A route already exist for route id [{}] ", routeId);
+    }
+  }
+
+  public void removeGlobalRoute(final I routeId) {
+    Preconditions.checkNotNull(routeId, "removeGlobalRoute: routeId cannot be null!");
+    LOG.debug("removeGlobalRoute: removing  a new route with id [{}]", routeId);
+    globalRpcMap.remove(routeId);
+  }
+
+  public Set<R> getRoutedRpc(final I routeId) {
+    Preconditions.checkNotNull(routeId, "getRoutes: routeId cannot be null!");
+    Set<R> routes = routedRpcMap.get(routeId);
+
+    if (routes == null) {
+      return Collections.emptySet();
+    }
+
+    return ImmutableSet.copyOf(routes);
+  }
+
+  public R getLastAddedRoutedRpc(final I routeId) {
+
+    Set<R> routes = getRoutedRpc(routeId);
+
+    if (routes.isEmpty()) {
+      return null;
+    }
+
+    R route = null;
+    Iterator<R> iter = routes.iterator();
+    while (iter.hasNext()) {
+      route = iter.next();
+    }
+
+    return route;
+  }
+
+  public void addRoutedRpc(final I routeId, final R route)   {
+    Preconditions.checkNotNull(routeId, "addRoute: routeId cannot be null");
+    Preconditions.checkNotNull(route, "addRoute: route cannot be null");
+    LOG.debug("addRoute: adding a route with k/v [{}/{}]", routeId, route);
+    threadSafeAdd(routeId, route);
+  }
+
+  public void addRoutedRpcs(final Set<I> routeIds, final R route) {
+    Preconditions.checkNotNull(routeIds, "addRoutes: routeIds must not be null");
+    for (I routeId : routeIds){
+      addRoutedRpc(routeId, route);
+    }
+  }
+
+  public void removeRoute(final I routeId, final R route) {
+    Preconditions.checkNotNull(routeId, "removeRoute: routeId cannot be null!");
+    Preconditions.checkNotNull(route, "removeRoute: route cannot be null!");
+
+    LinkedHashSet<R> routes = routedRpcMap.get(routeId);
+    if (routes == null) {
+      return;
+    }
+    LOG.debug("removeRoute: removing  a new route with k/v [{}/{}]", routeId, route);
+    threadSafeRemove(routeId, route);
+  }
+
+  public void removeRoutes(final Set<I> routeIds, final R route) {
+    Preconditions.checkNotNull(routeIds, "removeRoutes: routeIds must not be null");
+    for (I routeId : routeIds){
+      removeRoute(routeId, route);
+    }
+  }
+
+  /**
+   * This method guarantees that no 2 thread over write each other's changes.
+   * Just so that we dont end up in infinite loop, it tries for 100 times then throw
+   */
+  private void threadSafeAdd(final I routeId, final R route) {
+
+    for (int i=0;i<100;i++){
+
+      LinkedHashSet<R> updatedRoutes = new LinkedHashSet<>();
+      updatedRoutes.add(route);
+      LinkedHashSet<R> oldRoutes = routedRpcMap.putIfAbsent(routeId, updatedRoutes);
+      if (oldRoutes == null) {
+        return;
+      }
+
+      updatedRoutes = new LinkedHashSet<>(oldRoutes);
+      updatedRoutes.add(route);
+
+      if (routedRpcMap.replace(routeId, oldRoutes, updatedRoutes)) {
+        return;
+      }
+    }
+    //the method did not already return means it failed to add route in 100 attempts
+    throw new IllegalStateException("Failed to add route [" + routeId + "]");
+  }
+
+  /**
+   * This method guarantees that no 2 thread over write each other's changes.
+   * Just so that we dont end up in infinite loop, it tries for 100 times then throw
+   */
+  private void threadSafeRemove(final I routeId, final R route) {
+    LinkedHashSet<R> updatedRoutes = null;
+    for (int i=0;i<100;i++){
+      LinkedHashSet<R> oldRoutes = routedRpcMap.get(routeId);
+
+      // if route to be deleted is the only entry in the set then remove routeId from the cache
+      if ((oldRoutes.size() == 1) && oldRoutes.contains(route)){
+        routedRpcMap.remove(routeId);
+        return;
+      }
+
+      // if there are multiple routes for this routeId, remove the route to be deleted only from the set.
+      updatedRoutes = new LinkedHashSet<>(oldRoutes);
+      updatedRoutes.remove(route);
+      if (routedRpcMap.replace(routeId, oldRoutes, updatedRoutes)) {
+        return;
+      }
+
+    }
+    //the method did not already return means it failed to remove route in 100 attempts
+    throw new IllegalStateException("Failed to remove route [" + routeId + "]");
+  }
+}
index e36060cc13ece309f04f10adebdb74a62f158146..e2ebcb2b25a62c3f60232db52e90749736561948 100644 (file)
  */
 package org.opendaylight.controller.remote.rpc.registry;
 
-import akka.actor.ActorSelection;
+import akka.actor.ActorRef;
 import akka.actor.Address;
 import akka.actor.Props;
-import akka.cluster.ClusterEvent;
-import akka.cluster.Member;
-import akka.japi.Creator;
-import org.opendaylight.controller.remote.rpc.AbstractUntypedActor;
-import org.opendaylight.controller.remote.rpc.ActorConstants;
-import org.opendaylight.controller.remote.rpc.messages.AddRoutedRpc;
-import org.opendaylight.controller.remote.rpc.messages.AddRpc;
-import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpc;
-import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpcReply;
-import org.opendaylight.controller.remote.rpc.messages.GetRpc;
-import org.opendaylight.controller.remote.rpc.messages.GetRpcReply;
-import org.opendaylight.controller.remote.rpc.messages.RemoveRoutedRpc;
-import org.opendaylight.controller.remote.rpc.messages.RemoveRpc;
-import org.opendaylight.controller.remote.rpc.messages.RoutingTableData;
+import akka.actor.UntypedActor;
+import akka.dispatch.Mapper;
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
+import akka.japi.Option;
+import akka.japi.Pair;
+import akka.pattern.Patterns;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.remote.rpc.registry.gossip.Bucket;
+import org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore;
 import org.opendaylight.controller.sal.connector.api.RpcRouter;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import scala.collection.JavaConversions;
+import scala.concurrent.Future;
 
-import java.util.LinkedHashSet;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
 import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
+
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.AddOrUpdateRoutes;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.RemoveRoutes;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.SetLocalRouter;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRouters;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBuckets;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBucketsReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucket;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucketReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateBucket;
 
 /**
- * This Actor maintains the routing table state and sync it with other nodes in the cluster.
- *
- * A scheduler runs after an interval of time, which pick a random member from the cluster
- * and send the current state of routing table to the member.
- *
- * when a message of routing table data is received, it gets merged with the local routing table
- * to keep the latest data.
+ * Registry to look up cluster nodes that have registered for a given rpc.
+ * <p/>
+ * It uses {@link org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore} to maintain this
+ * cluster wide information.
  */
+public class RpcRegistry extends UntypedActor {
+
+    final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
+
+    /**
+     * Store to keep the registry. Bucket store sync's it across nodes in the cluster
+     */
+    private ActorRef bucketStore;
 
-public class RpcRegistry extends AbstractUntypedActor {
-
-  private static final Logger LOG = LoggerFactory.getLogger(RpcRegistry.class);
-  private RoutingTable<RpcRouter.RouteIdentifier<?, ?, ?>, String> routingTable;
-  private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
-  private final ClusterWrapper clusterWrapper;
-  private final ScheduledFuture<?> syncScheduler;
-
-  private RpcRegistry(ClusterWrapper clusterWrapper){
-    this.routingTable = new RoutingTable<>();
-    this.clusterWrapper = clusterWrapper;
-    this.syncScheduler = scheduler.scheduleAtFixedRate(new SendRoutingTable(), 10, 10, TimeUnit.SECONDS);
-  }
-
-  public static Props props(final ClusterWrapper clusterWrapper){
-    return Props.create(new Creator<RpcRegistry>(){
-
-      @Override
-      public RpcRegistry create() throws Exception {
-        return new RpcRegistry(clusterWrapper);
-      }
-    });
-  }
-
-  @Override
-  protected void handleReceive(Object message) throws Exception {
-    LOG.debug("Received message {}", message);
-    if(message instanceof RoutingTableData) {
-      syncRoutingTable((RoutingTableData) message);
-    } else if(message instanceof GetRoutedRpc) {
-      getRoutedRpc((GetRoutedRpc) message);
-    } else if(message instanceof GetRpc) {
-      getRpc((GetRpc) message);
-    } else if(message instanceof AddRpc) {
-      addRpc((AddRpc) message);
-    } else if(message instanceof RemoveRpc) {
-      removeRpc((RemoveRpc) message);
-    } else if(message instanceof AddRoutedRpc) {
-      addRoutedRpc((AddRoutedRpc) message);
-    } else if(message instanceof RemoveRoutedRpc) {
-      removeRoutedRpc((RemoveRoutedRpc) message);
+    /**
+     * Rpc broker that would use the registry to route requests.
+     */
+    private ActorRef localRouter;
+
+    public RpcRegistry() {
+        bucketStore = getContext().actorOf(Props.create(BucketStore.class), "store");
+    }
+
+    public RpcRegistry(ActorRef bucketStore) {
+        this.bucketStore = bucketStore;
     }
-  }
 
-  private void getRoutedRpc(GetRoutedRpc rpcMsg){
-    LOG.debug("Get latest routed Rpc location from routing table {}", rpcMsg);
-    String remoteActorPath = routingTable.getLastAddedRoutedRpc(rpcMsg.getRouteId());
-    GetRoutedRpcReply routedRpcReply = new GetRoutedRpcReply(remoteActorPath);
+    @Override
+    public void onReceive(Object message) throws Exception {
+
+        log.debug("Received message: message [{}]", message);
 
-    getSender().tell(routedRpcReply, self());
-  }
+        //TODO: if sender is remote, reject message
 
-  private void getRpc(GetRpc rpcMsg) {
-    LOG.debug("Get global Rpc location from routing table {}", rpcMsg);
-    String remoteActorPath = routingTable.getGlobalRoute(rpcMsg.getRouteId());
-    GetRpcReply rpcReply = new GetRpcReply(remoteActorPath);
+        if (message instanceof SetLocalRouter)
+            receiveSetLocalRouter((SetLocalRouter) message);
 
-    getSender().tell(rpcReply, self());
-  }
+        if (message instanceof AddOrUpdateRoutes)
+            receiveAddRoutes((AddOrUpdateRoutes) message);
 
-  private void addRpc(AddRpc rpcMsg) {
-    LOG.debug("Add Rpc to routing table {}", rpcMsg);
-    routingTable.addGlobalRoute(rpcMsg.getRouteId(), rpcMsg.getActorPath());
+        else if (message instanceof RemoveRoutes)
+            receiveRemoveRoutes((RemoveRoutes) message);
 
-    getSender().tell("Success", self());
-  }
+        else if (message instanceof Messages.FindRouters)
+            receiveGetRouter((FindRouters) message);
 
-  private void removeRpc(RemoveRpc rpcMsg) {
-    LOG.debug("Removing Rpc to routing table {}", rpcMsg);
-    routingTable.removeGlobalRoute(rpcMsg.getRouteId());
+        else
+            unhandled(message);
+    }
+
+    /**
+     * Register's rpc broker
+     *
+     * @param message contains {@link akka.actor.ActorRef} for rpc broker
+     */
+    private void receiveSetLocalRouter(SetLocalRouter message) {
+        localRouter = message.getRouter();
+    }
 
-    getSender().tell("Success", self());
-  }
+    /**
+     * @param msg
+     */
+    private void receiveAddRoutes(AddOrUpdateRoutes msg) {
 
-  private void addRoutedRpc(AddRoutedRpc rpcMsg) {
-    routingTable.addRoutedRpcs(rpcMsg.getAnnouncements(), rpcMsg.getActorPath());
-    getSender().tell("Success", self());
-  }
+        Preconditions.checkState(localRouter != null, "Router must be set first");
+
+        Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), 1000);
+        futureReply.map(getMapperToAddRoutes(msg.getRouteIdentifiers()), getContext().dispatcher());
+    }
 
-  private void removeRoutedRpc(RemoveRoutedRpc rpcMsg) {
-    routingTable.removeRoutes(rpcMsg.getAnnouncements(), rpcMsg.getActorPath());
-    getSender().tell("Success", self());
-  }
+    /**
+     * @param msg contains list of route ids to remove
+     */
+    private void receiveRemoveRoutes(RemoveRoutes msg) {
 
-  private void syncRoutingTable(RoutingTableData routingTableData) {
-    LOG.debug("Syncing routing table {}", routingTableData);
+        Future<Object> futureReply = Patterns.ask(bucketStore, new GetLocalBucket(), 1000);
+        futureReply.map(getMapperToRemoveRoutes(msg.getRouteIdentifiers()), getContext().dispatcher());
 
-    Map<RpcRouter.RouteIdentifier<?, ?, ?>, String> newRpcMap = routingTableData.getRpcMap();
-    Set<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds = newRpcMap.keySet();
-    for(RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
-      routingTable.addGlobalRoute(routeId, newRpcMap.get(routeId));
     }
 
-    Map<RpcRouter.RouteIdentifier<?, ?, ?>, LinkedHashSet<String>> newRoutedRpcMap =
-        routingTableData.getRoutedRpcMap();
-    routeIds = newRoutedRpcMap.keySet();
+    /**
+     * Finds routers for the given rpc.
+     *
+     * @param msg
+     */
+    private void receiveGetRouter(FindRouters msg) {
+        final ActorRef sender = getSender();
 
-    for(RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
-      Set<String> routeAddresses = newRoutedRpcMap.get(routeId);
-      for(String routeAddress : routeAddresses) {
-        routingTable.addRoutedRpc(routeId, routeAddress);
-      }
+        Future<Object> futureReply = Patterns.ask(bucketStore, new GetAllBuckets(), 1000);
+        futureReply.map(getMapperToGetRouter(msg.getRouteIdentifier(), sender), getContext().dispatcher());
     }
-  }
-
-  private ActorSelection getRandomRegistryActor() {
-    ClusterEvent.CurrentClusterState clusterState = clusterWrapper.getState();
-    ActorSelection actor = null;
-    Set<Member> members = JavaConversions.asJavaSet(clusterState.members());
-    int memberSize = members.size();
-    // Don't select yourself
-    if(memberSize > 1) {
-      Address currentNodeAddress = clusterWrapper.getAddress();
-      int index = new Random().nextInt(memberSize);
-      int i = 0;
-      // keeping previous member, in case when random index member is same as current actor
-      // and current actor member is last in set
-      Member previousMember = null;
-      for(Member member : members){
-        if(i == index-1) {
-          previousMember = member;
-        }
-        if(i == index) {
-          if(!currentNodeAddress.equals(member.address())) {
-            actor = this.context().actorSelection(member.address() + ActorConstants.RPC_REGISTRY_PATH);
-            break;
-          } else if(index < memberSize-1){ // pick the next element in the set
-            index++;
-          }
+
+    /**
+     * Helper to create empty reply when no routers are found
+     *
+     * @return
+     */
+    private Messages.FindRoutersReply createEmptyReply() {
+        List<Pair<ActorRef, Long>> routerWithUpdateTime = Collections.emptyList();
+        return new Messages.FindRoutersReply(routerWithUpdateTime);
+    }
+
+    /**
+     * Helper to create a reply when routers are found for the given rpc
+     *
+     * @param buckets
+     * @param routeId
+     * @return
+     */
+    private Messages.FindRoutersReply createReplyWithRouters(Map<Address, Bucket> buckets, RpcRouter.RouteIdentifier<?, ?, ?> routeId) {
+
+        List<Pair<ActorRef, Long>> routers = new ArrayList<>();
+        Option<Pair<ActorRef, Long>> routerWithUpdateTime = null;
+
+        for (Bucket bucket : buckets.values()) {
+
+            RoutingTable table = (RoutingTable) bucket.getData();
+            if (table == null)
+                continue;
+
+            routerWithUpdateTime = table.getRouterFor(routeId);
+            if (routerWithUpdateTime.isEmpty())
+                continue;
+
+            routers.add(routerWithUpdateTime.get());
         }
-        i++;
-      }
-      if(actor == null && previousMember != null) {
-        actor = this.context().actorSelection(previousMember.address() + ActorConstants.RPC_REGISTRY_PATH);
-      }
+
+        return new Messages.FindRoutersReply(routers);
     }
-    return actor;
-  }
 
-  private class SendRoutingTable implements Runnable {
 
-    @Override
-    public void run() {
-      RoutingTableData routingTableData =
-          new RoutingTableData(routingTable.getGlobalRpcMap(), routingTable.getRoutedRpcMap());
-      LOG.debug("Sending routing table for sync {}", routingTableData);
-      ActorSelection actor = getRandomRegistryActor();
-      if(actor != null) {
-        actor.tell(routingTableData, self());
-      }
+    ///
+    ///private factories to create Mapper
+    ///
+
+    /**
+     * Receives all buckets returned from bucket store and finds routers for the buckets where given rpc(routeId) is found
+     *
+     * @param routeId the rpc
+     * @param sender  client who asked to find the routers.
+     * @return
+     */
+    private Mapper<Object, Void> getMapperToGetRouter(final RpcRouter.RouteIdentifier<?, ?, ?> routeId, final ActorRef sender) {
+        return new Mapper<Object, Void>() {
+            @Override
+            public Void apply(Object replyMessage) {
+
+                if (replyMessage instanceof GetAllBucketsReply) {
+
+                    GetAllBucketsReply reply = (GetAllBucketsReply) replyMessage;
+                    Map<Address, Bucket> buckets = reply.getBuckets();
+
+                    if (buckets == null || buckets.isEmpty()) {
+                        sender.tell(createEmptyReply(), getSelf());
+                        return null;
+                    }
+
+                    sender.tell(createReplyWithRouters(buckets, routeId), getSelf());
+                }
+                return null;
+            }
+        };
+    }
+
+    /**
+     * Receives local bucket from bucket store and updates routing table in it by removing the route. Subsequently,
+     * it updates the local bucket in bucket store.
+     *
+     * @param routeIds rpc to remote
+     * @return
+     */
+    private Mapper<Object, Void> getMapperToRemoveRoutes(final List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds) {
+        return new Mapper<Object, Void>() {
+            @Override
+            public Void apply(Object replyMessage) {
+                if (replyMessage instanceof GetLocalBucketReply) {
+
+                    GetLocalBucketReply reply = (GetLocalBucketReply) replyMessage;
+                    Bucket<RoutingTable> bucket = reply.getBucket();
+
+                    if (bucket == null) {
+                        log.debug("Local bucket is null");
+                        return null;
+                    }
+
+                    RoutingTable table = bucket.getData();
+                    if (table == null)
+                        table = new RoutingTable();
+
+                    table.setRouter(localRouter);
+
+                    if (!table.isEmpty()) {
+                        for (RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
+                            table.removeRoute(routeId);
+                        }
+                    }
+                    bucket.setData(table);
+
+                    UpdateBucket updateBucketMessage = new UpdateBucket(bucket);
+                    bucketStore.tell(updateBucketMessage, getSelf());
+                }
+                return null;
+            }
+        };
+    }
+
+    /**
+     * Receives local bucket from bucket store and updates routing table in it by adding the route. Subsequently,
+     * it updates the local bucket in bucket store.
+     *
+     * @param routeIds rpc to add
+     * @return
+     */
+    private Mapper<Object, Void> getMapperToAddRoutes(final List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds) {
+
+        return new Mapper<Object, Void>() {
+            @Override
+            public Void apply(Object replyMessage) {
+                if (replyMessage instanceof GetLocalBucketReply) {
+
+                    GetLocalBucketReply reply = (GetLocalBucketReply) replyMessage;
+                    Bucket<RoutingTable> bucket = reply.getBucket();
+
+                    if (bucket == null) {
+                        log.debug("Local bucket is null");
+                        return null;
+                    }
+
+                    RoutingTable table = bucket.getData();
+                    if (table == null)
+                        table = new RoutingTable();
+
+                    table.setRouter(localRouter);
+                    for (RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
+                        table.addRoute(routeId);
+                    }
+
+                    bucket.setData(table);
+
+                    UpdateBucket updateBucketMessage = new UpdateBucket(bucket);
+                    bucketStore.tell(updateBucketMessage, getSelf());
+                }
+
+                return null;
+            }
+        };
+    }
+
+    /**
+     * All messages used by the RpcRegistry
+     */
+    public static class Messages {
+
+
+        public static class ContainsRoute {
+            final List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIdentifiers;
+
+            public ContainsRoute(List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIdentifiers) {
+                Preconditions.checkArgument(routeIdentifiers != null &&
+                                            !routeIdentifiers.isEmpty(),
+                                            "Route Identifiers must be supplied");
+                this.routeIdentifiers = routeIdentifiers;
+            }
+
+            public List<RpcRouter.RouteIdentifier<?, ?, ?>> getRouteIdentifiers() {
+                return this.routeIdentifiers;
+            }
+
+            @Override
+            public String toString() {
+                return "ContainsRoute{" +
+                        "routeIdentifiers=" + routeIdentifiers +
+                        '}';
+            }
+        }
+
+        public static class AddOrUpdateRoutes extends ContainsRoute {
+
+            public AddOrUpdateRoutes(List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIdentifiers) {
+                super(routeIdentifiers);
+            }
+        }
+
+        public static class RemoveRoutes extends ContainsRoute {
+
+            public RemoveRoutes(List<RpcRouter.RouteIdentifier<?, ?, ?>> routeIdentifiers) {
+                super(routeIdentifiers);
+            }
+        }
+
+        public static class SetLocalRouter {
+            private final ActorRef router;
+
+            public SetLocalRouter(ActorRef router) {
+                Preconditions.checkArgument(router != null, "Router must not be null");
+                this.router = router;
+            }
+
+            public ActorRef getRouter() {
+                return this.router;
+            }
+
+            @Override
+            public String toString() {
+                return "SetLocalRouter{" +
+                        "router=" + router +
+                        '}';
+            }
+        }
+
+        public static class FindRouters {
+            private final RpcRouter.RouteIdentifier<?, ?, ?> routeIdentifier;
+
+            public FindRouters(RpcRouter.RouteIdentifier<?, ?, ?> routeIdentifier) {
+                Preconditions.checkArgument(routeIdentifier != null, "Route must not be null");
+                this.routeIdentifier = routeIdentifier;
+            }
+
+            public RpcRouter.RouteIdentifier<?, ?, ?> getRouteIdentifier() {
+                return routeIdentifier;
+            }
+
+            @Override
+            public String toString() {
+                return "FindRouters{" +
+                        "routeIdentifier=" + routeIdentifier +
+                        '}';
+            }
+        }
+
+        public static class FindRoutersReply {
+            final List<Pair<ActorRef, Long>> routerWithUpdateTime;
+
+            public FindRoutersReply(List<Pair<ActorRef, Long>> routerWithUpdateTime) {
+                Preconditions.checkArgument(routerWithUpdateTime != null, "List of routers found must not be null");
+                this.routerWithUpdateTime = routerWithUpdateTime;
+            }
+
+            public List<Pair<ActorRef, Long>> getRouterWithUpdateTime() {
+                return routerWithUpdateTime;
+            }
+
+            @Override
+            public String toString() {
+                return "FindRoutersReply{" +
+                        "routerWithUpdateTime=" + routerWithUpdateTime +
+                        '}';
+            }
+        }
     }
-  }
 }
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistryOld.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistryOld.java
new file mode 100644 (file)
index 0000000..96c8802
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry;
+
+import akka.actor.ActorSelection;
+import akka.actor.Address;
+import akka.actor.Props;
+import akka.cluster.ClusterEvent;
+import akka.cluster.Member;
+import akka.japi.Creator;
+import org.opendaylight.controller.remote.rpc.AbstractUntypedActor;
+import org.opendaylight.controller.remote.rpc.ActorConstants;
+import org.opendaylight.controller.remote.rpc.messages.AddRoutedRpc;
+import org.opendaylight.controller.remote.rpc.messages.AddRpc;
+import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpc;
+import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpcReply;
+import org.opendaylight.controller.remote.rpc.messages.GetRpc;
+import org.opendaylight.controller.remote.rpc.messages.GetRpcReply;
+import org.opendaylight.controller.remote.rpc.messages.RemoveRoutedRpc;
+import org.opendaylight.controller.remote.rpc.messages.RemoveRpc;
+import org.opendaylight.controller.remote.rpc.messages.RoutingTableData;
+import org.opendaylight.controller.sal.connector.api.RpcRouter;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.collection.JavaConversions;
+
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * This Actor maintains the routing table state and sync it with other nodes in the cluster.
+ *
+ * A scheduler runs after an interval of time, which pick a random member from the cluster
+ * and send the current state of routing table to the member.
+ *
+ * when a message of routing table data is received, it gets merged with the local routing table
+ * to keep the latest data.
+ */
+
+public class RpcRegistryOld extends AbstractUntypedActor {
+
+  private static final Logger LOG = LoggerFactory.getLogger(RpcRegistryOld.class);
+  private RoutingTableOld<RpcRouter.RouteIdentifier<?, ?, ?>, String> routingTable;
+  private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
+  private final ClusterWrapper clusterWrapper;
+  private final ScheduledFuture<?> syncScheduler;
+
+  private RpcRegistryOld(ClusterWrapper clusterWrapper){
+    this.routingTable = new RoutingTableOld<>();
+    this.clusterWrapper = clusterWrapper;
+    this.syncScheduler = scheduler.scheduleAtFixedRate(new SendRoutingTable(), 10, 10, TimeUnit.SECONDS);
+  }
+
+  public static Props props(final ClusterWrapper clusterWrapper){
+    return Props.create(new Creator<RpcRegistryOld>(){
+
+      @Override
+      public RpcRegistryOld create() throws Exception {
+        return new RpcRegistryOld(clusterWrapper);
+      }
+    });
+  }
+
+  @Override
+  protected void handleReceive(Object message) throws Exception {
+    LOG.debug("Received message {}", message);
+    if(message instanceof RoutingTableData) {
+      syncRoutingTable((RoutingTableData) message);
+    } else if(message instanceof GetRoutedRpc) {
+      getRoutedRpc((GetRoutedRpc) message);
+    } else if(message instanceof GetRpc) {
+      getRpc((GetRpc) message);
+    } else if(message instanceof AddRpc) {
+      addRpc((AddRpc) message);
+    } else if(message instanceof RemoveRpc) {
+      removeRpc((RemoveRpc) message);
+    } else if(message instanceof AddRoutedRpc) {
+      addRoutedRpc((AddRoutedRpc) message);
+    } else if(message instanceof RemoveRoutedRpc) {
+      removeRoutedRpc((RemoveRoutedRpc) message);
+    }
+  }
+
+  private void getRoutedRpc(GetRoutedRpc rpcMsg){
+    LOG.debug("Get latest routed Rpc location from routing table {}", rpcMsg);
+    String remoteActorPath = routingTable.getLastAddedRoutedRpc(rpcMsg.getRouteId());
+    GetRoutedRpcReply routedRpcReply = new GetRoutedRpcReply(remoteActorPath);
+
+    getSender().tell(routedRpcReply, self());
+  }
+
+  private void getRpc(GetRpc rpcMsg) {
+    LOG.debug("Get global Rpc location from routing table {}", rpcMsg);
+    String remoteActorPath = routingTable.getGlobalRoute(rpcMsg.getRouteId());
+    GetRpcReply rpcReply = new GetRpcReply(remoteActorPath);
+
+    getSender().tell(rpcReply, self());
+  }
+
+  private void addRpc(AddRpc rpcMsg) {
+    LOG.debug("Add Rpc to routing table {}", rpcMsg);
+    routingTable.addGlobalRoute(rpcMsg.getRouteId(), rpcMsg.getActorPath());
+
+    getSender().tell("Success", self());
+  }
+
+  private void removeRpc(RemoveRpc rpcMsg) {
+    LOG.debug("Removing Rpc to routing table {}", rpcMsg);
+    routingTable.removeGlobalRoute(rpcMsg.getRouteId());
+
+    getSender().tell("Success", self());
+  }
+
+  private void addRoutedRpc(AddRoutedRpc rpcMsg) {
+    routingTable.addRoutedRpcs(rpcMsg.getAnnouncements(), rpcMsg.getActorPath());
+    getSender().tell("Success", self());
+  }
+
+  private void removeRoutedRpc(RemoveRoutedRpc rpcMsg) {
+    routingTable.removeRoutes(rpcMsg.getAnnouncements(), rpcMsg.getActorPath());
+    getSender().tell("Success", self());
+  }
+
+  private void syncRoutingTable(RoutingTableData routingTableData) {
+    LOG.debug("Syncing routing table {}", routingTableData);
+
+    Map<RpcRouter.RouteIdentifier<?, ?, ?>, String> newRpcMap = routingTableData.getRpcMap();
+    Set<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds = newRpcMap.keySet();
+    for(RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
+      routingTable.addGlobalRoute(routeId, newRpcMap.get(routeId));
+    }
+
+    Map<RpcRouter.RouteIdentifier<?, ?, ?>, LinkedHashSet<String>> newRoutedRpcMap =
+        routingTableData.getRoutedRpcMap();
+    routeIds = newRoutedRpcMap.keySet();
+
+    for(RpcRouter.RouteIdentifier<?, ?, ?> routeId : routeIds) {
+      Set<String> routeAddresses = newRoutedRpcMap.get(routeId);
+      for(String routeAddress : routeAddresses) {
+        routingTable.addRoutedRpc(routeId, routeAddress);
+      }
+    }
+  }
+
+  private ActorSelection getRandomRegistryActor() {
+    ClusterEvent.CurrentClusterState clusterState = clusterWrapper.getState();
+    ActorSelection actor = null;
+    Set<Member> members = JavaConversions.asJavaSet(clusterState.members());
+    int memberSize = members.size();
+    // Don't select yourself
+    if(memberSize > 1) {
+      Address currentNodeAddress = clusterWrapper.getAddress();
+      int index = new Random().nextInt(memberSize);
+      int i = 0;
+      // keeping previous member, in case when random index member is same as current actor
+      // and current actor member is last in set
+      Member previousMember = null;
+      for(Member member : members){
+        if(i == index-1) {
+          previousMember = member;
+        }
+        if(i == index) {
+          if(!currentNodeAddress.equals(member.address())) {
+            actor = this.context().actorSelection(member.address() + ActorConstants.RPC_REGISTRY_PATH);
+            break;
+          } else if(index < memberSize-1){ // pick the next element in the set
+            index++;
+          }
+        }
+        i++;
+      }
+      if(actor == null && previousMember != null) {
+        actor = this.context().actorSelection(previousMember.address() + ActorConstants.RPC_REGISTRY_PATH);
+      }
+    }
+    return actor;
+  }
+
+  private class SendRoutingTable implements Runnable {
+
+    @Override
+    public void run() {
+      RoutingTableData routingTableData =
+          new RoutingTableData(routingTable.getGlobalRpcMap(), routingTable.getRoutedRpcMap());
+      LOG.debug("Sending routing table for sync {}", routingTableData);
+      ActorSelection actor = getRandomRegistryActor();
+      if(actor != null) {
+        actor.tell(routingTableData, self());
+      }
+    }
+  }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Bucket.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Bucket.java
new file mode 100644 (file)
index 0000000..f5dfbc5
--- /dev/null
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+
+public interface Bucket<T extends Copier<T>> {
+    public Long getVersion();
+    public T getData();
+    public void setData(T data);
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketImpl.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketImpl.java
new file mode 100644 (file)
index 0000000..3cdd924
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+import java.io.Serializable;
+
+public class BucketImpl<T extends Copier<T>> implements Bucket<T>, Serializable {
+
+    private Long version = System.currentTimeMillis();;
+
+    private T data;
+
+    @Override
+    public Long getVersion() {
+        return version;
+    }
+
+    @Override
+    public T getData() {
+        if (this.data == null)
+            return null;
+
+        return data.copy();
+    }
+
+    public void setData(T data){
+        this.version = System.currentTimeMillis()+1;
+        this.data = data;
+    }
+
+    @Override
+    public String toString() {
+        return "BucketImpl{" +
+                "version=" + version +
+                ", data=" + data +
+                '}';
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStore.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStore.java
new file mode 100644 (file)
index 0000000..2f634ce
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+import akka.actor.ActorRef;
+import akka.actor.Address;
+import akka.actor.Props;
+import akka.actor.UntypedActor;
+import akka.cluster.Cluster;
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBuckets;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetAllBucketsReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersions;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersionsReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembers;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucket;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetLocalBucketReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateBucket;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
+
+/**
+ * A store that syncs its data across nodes in the cluster.
+ * It maintains a {@link org.opendaylight.controller.remote.rpc.registry.gossip.Bucket} per node. Buckets are versioned.
+ * A node can write ONLY to its bucket. This way, write conflicts are avoided.
+ * <p>
+ * Buckets are sync'ed across nodes using Gossip protocol (http://en.wikipedia.org/wiki/Gossip_protocol)<p>
+ * This store uses a {@link org.opendaylight.controller.remote.rpc.registry.gossip.Gossiper}.
+ *
+ */
+public class BucketStore extends UntypedActor {
+
+    final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
+
+    /**
+     * Bucket owned by the node
+     */
+    private BucketImpl localBucket = new BucketImpl();;
+
+    /**
+     * Buckets ownded by other known nodes in the cluster
+     */
+    private ConcurrentMap<Address, Bucket> remoteBuckets = new ConcurrentHashMap<>();
+
+    /**
+     * Bucket version for every known node in the cluster including this node
+     */
+    private ConcurrentMap<Address, Long> versions = new ConcurrentHashMap<>();
+
+    /**
+     * Cluster address for this node
+     */
+    private final Address selfAddress = Cluster.get(getContext().system()).selfAddress();
+
+    /**
+     * Our private gossiper
+     */
+    private ActorRef gossiper;
+
+    public BucketStore(){
+        gossiper = getContext().actorOf(Props.create(Gossiper.class), "gossiper");
+    }
+
+    /**
+     * This constructor is useful for testing.
+     * TODO: Pass Props instead of ActorRef
+     *
+     * @param gossiper
+     */
+    public BucketStore(ActorRef gossiper){
+        this.gossiper = gossiper;
+    }
+
+    @Override
+    public void onReceive(Object message) throws Exception {
+
+        log.debug("Received message: node[{}], message[{}]", selfAddress, message);
+
+        if (message instanceof UpdateBucket)
+            receiveUpdateBucket(((UpdateBucket) message).getBucket());
+
+        else if (message instanceof GetAllBuckets)
+            receiveGetAllBucket();
+
+        else if (message instanceof GetLocalBucket)
+            receiveGetLocalBucket();
+
+        else if (message instanceof GetBucketsByMembers)
+            receiveGetBucketsByMembers(((GetBucketsByMembers) message).getMembers());
+
+        else if (message instanceof GetBucketVersions)
+            receiveGetBucketVersions();
+
+        else if (message instanceof UpdateRemoteBuckets)
+            receiveUpdateRemoteBuckets(((UpdateRemoteBuckets) message).getBuckets());
+
+        else {
+            log.debug("Unhandled message [{}]", message);
+            unhandled(message);
+        }
+
+    }
+
+    /**
+     * Returns a copy of bucket owned by this node
+     */
+    private void receiveGetLocalBucket() {
+        final ActorRef sender = getSender();
+        GetLocalBucketReply reply = new GetLocalBucketReply(localBucket);
+        sender.tell(reply, getSelf());
+    }
+
+    /**
+     * Updates the bucket owned by this node
+     *
+     * @param updatedBucket
+     */
+    void receiveUpdateBucket(Bucket updatedBucket){
+
+        localBucket = (BucketImpl) updatedBucket;
+        versions.put(selfAddress, localBucket.getVersion());
+    }
+
+    /**
+     * Returns all the buckets the this node knows about, self owned + remote
+     */
+    void receiveGetAllBucket(){
+        final ActorRef sender = getSender();
+        sender.tell(new GetAllBucketsReply(getAllBuckets()), getSelf());
+    }
+
+    /**
+     * Helper to collect all known buckets
+     *
+     * @return self owned + remote buckets
+     */
+    Map<Address, Bucket> getAllBuckets(){
+        Map<Address, Bucket> all = new HashMap<>(remoteBuckets.size() + 1);
+
+        //first add the local bucket
+        all.put(selfAddress, localBucket);
+
+        //then get all remote buckets
+        all.putAll(remoteBuckets);
+
+        return all;
+    }
+
+    /**
+     * Returns buckets for requested members that this node knows about
+     *
+     * @param members requested members
+     */
+    void receiveGetBucketsByMembers(Set<Address> members){
+        final ActorRef sender = getSender();
+        Map<Address, Bucket> buckets = getBucketsByMembers(members);
+        sender.tell(new GetBucketsByMembersReply(buckets), getSelf());
+    }
+
+    /**
+     * Helper to collect buckets for requested memebers
+     *
+     * @param members requested members
+     * @return buckets for requested memebers
+     */
+    Map<Address, Bucket> getBucketsByMembers(Set<Address> members) {
+        Map<Address, Bucket> buckets = new HashMap<>();
+
+        //first add the local bucket if asked
+        if (members.contains(selfAddress))
+            buckets.put(selfAddress, localBucket);
+
+        //then get buckets for requested remote nodes
+        for (Address address : members){
+            if (remoteBuckets.containsKey(address))
+                buckets.put(address, remoteBuckets.get(address));
+        }
+
+        return buckets;
+    }
+
+    /**
+     * Returns versions for all buckets known
+     */
+    void receiveGetBucketVersions(){
+        final ActorRef sender = getSender();
+        GetBucketVersionsReply reply = new GetBucketVersionsReply(versions);
+        sender.tell(reply, getSelf());
+    }
+
+    /**
+     * Update local copy of remote buckets where local copy's version is older
+     *
+     * @param receivedBuckets buckets sent by remote
+     *                        {@link org.opendaylight.controller.remote.rpc.registry.gossip.Gossiper}
+     */
+    void receiveUpdateRemoteBuckets(Map<Address, Bucket> receivedBuckets){
+
+        if (receivedBuckets == null || receivedBuckets.isEmpty())
+            return; //nothing to do
+
+        //Remote cant update self's bucket
+        receivedBuckets.remove(selfAddress);
+
+        for (Map.Entry<Address, Bucket> entry : receivedBuckets.entrySet()){
+
+            Long localVersion = versions.get(entry.getKey());
+            if (localVersion == null) localVersion = -1L;
+
+            Bucket receivedBucket = entry.getValue();
+
+            if (receivedBucket == null)
+                continue;
+
+            Long remoteVersion = receivedBucket.getVersion();
+            if (remoteVersion == null) remoteVersion = -1L;
+
+            //update only if remote version is newer
+            if ( remoteVersion > localVersion ) {
+                remoteBuckets.put(entry.getKey(), receivedBucket);
+                versions.put(entry.getKey(), remoteVersion);
+            }
+        }
+
+        log.debug("State after update - Local Bucket [{}], Remote Buckets [{}]", localBucket, remoteBuckets);
+    }
+
+    ///
+    ///Getter Setters
+    ///
+
+    BucketImpl getLocalBucket() {
+        return localBucket;
+    }
+
+    void setLocalBucket(BucketImpl localBucket) {
+        this.localBucket = localBucket;
+    }
+
+    ConcurrentMap<Address, Bucket> getRemoteBuckets() {
+        return remoteBuckets;
+    }
+
+    void setRemoteBuckets(ConcurrentMap<Address, Bucket> remoteBuckets) {
+        this.remoteBuckets = remoteBuckets;
+    }
+
+    ConcurrentMap<Address, Long> getVersions() {
+        return versions;
+    }
+
+    void setVersions(ConcurrentMap<Address, Long> versions) {
+        this.versions = versions;
+    }
+
+    Address getSelfAddress() {
+        return selfAddress;
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Copier.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Copier.java
new file mode 100644 (file)
index 0000000..45279eb
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+/**
+ * Type of data that goes in {@link org.opendaylight.controller.remote.rpc.registry.gossip.Bucket}.
+ * The implementers should do deep cloning in copy() method.
+ */
+public interface Copier<T> {
+    public T copy();
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Gossiper.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Gossiper.java
new file mode 100644 (file)
index 0000000..2320789
--- /dev/null
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.Address;
+import akka.actor.Cancellable;
+import akka.actor.UntypedActor;
+import akka.cluster.Cluster;
+import akka.cluster.ClusterEvent;
+import akka.cluster.Member;
+import akka.dispatch.Mapper;
+import akka.event.Logging;
+import akka.event.LoggingAdapter;
+import akka.pattern.Patterns;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersions;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketVersionsReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembers;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.GetBucketsByMembersReply;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.UpdateRemoteBuckets;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipTick;
+
+/**
+ * Gossiper that syncs bucket store across nodes in the cluster.
+ * <p/>
+ * It keeps a local scheduler that periodically sends Gossip ticks to
+ * itself to send bucket store's bucket versions to a randomly selected remote
+ * gossiper.
+ * <p/>
+ * When bucket versions are received from a remote gossiper, it is compared
+ * with bucket store's bucket versions. Which ever buckets are newer
+ * locally, are sent to remote gossiper. If any bucket is older in bucket store,
+ * a gossip status is sent to remote gossiper so that it can send the newer buckets.
+ * <p/>
+ * When a bucket is received from a remote gossiper, its sent to the bucket store
+ * for update.
+ *
+ */
+
+public class Gossiper extends UntypedActor {
+
+    final LoggingAdapter log = Logging.getLogger(getContext().system(), this);
+
+    Cluster cluster = Cluster.get(getContext().system());
+
+    /**
+     * ActorSystem's address for the current cluster node.
+     */
+    private Address selfAddress = cluster.selfAddress();
+
+    /**
+     * All known cluster members
+     */
+    private List<Address> clusterMembers = new ArrayList<>();
+
+    private Cancellable gossipTask;
+
+    private Boolean autoStartGossipTicks = true;
+
+    public Gossiper(){}
+
+    /**
+     * Helpful for testing
+     * @param autoStartGossipTicks used for turning off gossip ticks during testing.
+     *                             Gossip tick can be manually sent.
+     */
+    public Gossiper(Boolean autoStartGossipTicks){
+        this.autoStartGossipTicks = autoStartGossipTicks;
+    }
+
+    @Override
+    public void preStart(){
+
+        cluster.subscribe(getSelf(),
+                          ClusterEvent.initialStateAsEvents(),
+                          ClusterEvent.MemberEvent.class,
+                          ClusterEvent.UnreachableMember.class);
+
+        if (autoStartGossipTicks) {
+            gossipTask = getContext().system().scheduler().schedule(
+                    new FiniteDuration(1, TimeUnit.SECONDS),        //initial delay
+                    new FiniteDuration(500, TimeUnit.MILLISECONDS), //interval
+                    getSelf(),                                       //target
+                    new Messages.GossiperMessages.GossipTick(),      //message
+                    getContext().dispatcher(),                       //execution context
+                    getSelf()                                        //sender
+            );
+        }
+    }
+
+    @Override
+    public void postStop(){
+        if (cluster != null)
+            cluster.unsubscribe(getSelf());
+        if (gossipTask != null)
+            gossipTask.cancel();
+    }
+
+    @Override
+    public void onReceive(Object message) throws Exception {
+
+        log.debug("Received message: node[{}], message[{}]", selfAddress, message);
+
+        //Usually sent by self via gossip task defined above. But its not enforced.
+        //These ticks can be sent by another actor as well which is esp. useful while testing
+        if (message instanceof GossipTick)
+            receiveGossipTick();
+
+        //Message from remote gossiper with its bucket versions
+        else if (message instanceof GossipStatus)
+            receiveGossipStatus((GossipStatus) message);
+
+        //Message from remote gossiper with buckets. This is usually in response to GossipStatus message
+        //The contained buckets are newer as determined by the remote gossiper by comparing the GossipStatus
+        //message with its local versions
+        else if (message instanceof GossipEnvelope)
+            receiveGossip((GossipEnvelope) message);
+
+        else if (message instanceof ClusterEvent.MemberUp) {
+            receiveMemberUp(((ClusterEvent.MemberUp) message).member());
+
+        } else if (message instanceof ClusterEvent.MemberRemoved) {
+            receiveMemberRemoveOrUnreachable(((ClusterEvent.MemberRemoved) message).member());
+
+        } else if ( message instanceof ClusterEvent.UnreachableMember){
+            receiveMemberRemoveOrUnreachable(((ClusterEvent.UnreachableMember) message).member());
+
+        } else
+            unhandled(message);
+    }
+
+    /**
+     * Remove member from local copy of member list. If member down is self, then stop the actor
+     *
+     * @param member who went down
+     */
+    void receiveMemberRemoveOrUnreachable(Member member) {
+        //if its self, then stop itself
+        if (selfAddress.equals(member.address())){
+            getContext().stop(getSelf());
+            return;
+        }
+
+        clusterMembers.remove(member.address());
+        log.debug("Removed member [{}], Active member list [{}]", member.address(), clusterMembers);
+    }
+
+    /**
+     * Add member to the local copy of member list if it doesnt already
+     * @param member
+     */
+    void receiveMemberUp(Member member) {
+
+        if (selfAddress.equals(member.address()))
+            return; //ignore up notification for self
+
+        if (!clusterMembers.contains(member.address()))
+            clusterMembers.add(member.address());
+
+        log.debug("Added member [{}], Active member list [{}]", member.address(), clusterMembers);
+    }
+
+    /**
+     * Sends Gossip status to other members in the cluster. <br/>
+     * 1. If there are no member, ignore the tick. </br>
+     * 2. If there's only 1 member, send gossip status (bucket versions) to it. <br/>
+     * 3. If there are more than one member, randomly pick one and send gossip status (bucket versions) to it.
+     */
+    void receiveGossipTick(){
+        if (clusterMembers.size() == 0) return; //no members to send gossip status to
+
+        Address remoteMemberToGossipTo = null;
+
+        if (clusterMembers.size() == 1)
+            remoteMemberToGossipTo = clusterMembers.get(0);
+        else {
+            Integer randomIndex = ThreadLocalRandom.current().nextInt(0, clusterMembers.size());
+            remoteMemberToGossipTo = clusterMembers.get(randomIndex);
+        }
+
+        log.debug("Gossiping to [{}]", remoteMemberToGossipTo);
+        getLocalStatusAndSendTo(remoteMemberToGossipTo);
+    }
+
+    /**
+     * Process gossip status received from a remote gossiper. Remote versions are compared with
+     * the local copy. <p>
+     *
+     * For each bucket
+     * <ul>
+     *  <li>If local copy is newer, the newer buckets are sent in GossipEnvelope to remote</li>
+     *  <li>If local is older, GossipStatus is sent to remote so that it can reply with GossipEnvelope</li>
+     *  <li>If both are same, noop</li>
+     * </ul>
+     *
+     * @param status bucket versions from a remote member
+     */
+    void receiveGossipStatus(GossipStatus status){
+        //Don't accept messages from non-members
+        if (!clusterMembers.contains(status.from()))
+            return;
+
+        final ActorRef sender = getSender();
+        Future<Object> futureReply = Patterns.ask(getContext().parent(), new GetBucketVersions(), 1000);
+        futureReply.map(getMapperToProcessRemoteStatus(sender, status), getContext().dispatcher());
+
+    }
+
+    /**
+     * Sends the received buckets in the envelope to the parent Bucket store.
+     *
+     * @param envelope contains buckets from a remote gossiper
+     */
+    void receiveGossip(GossipEnvelope envelope){
+        //TODO: Add more validations
+        if (!selfAddress.equals(envelope.to())) {
+            log.debug("Ignoring message intended for someone else. From [{}] to [{}]", envelope.from(), envelope.to());
+            return;
+        }
+
+        updateRemoteBuckets(envelope.getBuckets());
+
+    }
+
+    /**
+     * Helper to send received buckets to bucket store
+     *
+     * @param buckets
+     */
+    void updateRemoteBuckets(Map<Address, Bucket> buckets) {
+
+        UpdateRemoteBuckets updateRemoteBuckets = new UpdateRemoteBuckets(buckets);
+        getContext().parent().tell(updateRemoteBuckets, getSelf());
+    }
+
+    /**
+     * Gets the buckets from bucket store for the given node addresses and sends them to remote gossiper
+     *
+     * @param remote     remote node to send Buckets to
+     * @param addresses  node addresses whose buckets needs to be sent
+     */
+    void sendGossipTo(final ActorRef remote, final Set<Address> addresses){
+
+        Future<Object> futureReply = Patterns.ask(getContext().parent(), new GetBucketsByMembers(addresses), 1000);
+        futureReply.map(getMapperToSendGossip(remote), getContext().dispatcher());
+    }
+
+    /**
+     * Gets bucket versions from bucket store and sends to the supplied address
+     *
+     * @param remoteActorSystemAddress remote gossiper to send to
+     */
+    void getLocalStatusAndSendTo(Address remoteActorSystemAddress){
+
+        //Get local status from bucket store and send to remote
+        Future<Object> futureReply = Patterns.ask(getContext().parent(), new GetBucketVersions(), 1000);
+        ActorSelection remoteRef = getContext().system().actorSelection(
+                remoteActorSystemAddress.toString() + getSelf().path().toStringWithoutAddress());
+
+        log.debug("Sending bucket versions to [{}]", remoteRef);
+
+        futureReply.map(getMapperToSendLocalStatus(remoteRef), getContext().dispatcher());
+
+    }
+
+    /**
+     * Helper to send bucket versions received from local store
+     * @param remote        remote gossiper to send versions to
+     * @param localVersions bucket versions received from local store
+     */
+    void sendGossipStatusTo(ActorRef remote, Map<Address, Long> localVersions){
+
+        GossipStatus status = new GossipStatus(selfAddress, localVersions);
+        remote.tell(status, getSelf());
+    }
+
+    void sendGossipStatusTo(ActorSelection remote, Map<Address, Long> localVersions){
+
+        GossipStatus status = new GossipStatus(selfAddress, localVersions);
+        remote.tell(status, getSelf());
+    }
+
+    ///
+    /// Private factories to create mappers
+    ///
+
+    private Mapper<Object, Void> getMapperToSendLocalStatus(final ActorSelection remote){
+
+        return new Mapper<Object, Void>() {
+            @Override
+            public Void apply(Object replyMessage) {
+                if (replyMessage instanceof GetBucketVersionsReply) {
+                    GetBucketVersionsReply reply = (GetBucketVersionsReply) replyMessage;
+                    Map<Address, Long> localVersions = reply.getVersions();
+
+                    sendGossipStatusTo(remote, localVersions);
+
+                }
+                return null;
+            }
+        };
+    }
+
+    /**
+     * Process bucket versions received from
+     * {@link org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore}.
+     * Then this method compares remote bucket versions with local bucket versions.
+     * <ul>
+     *     <li>The buckets that are newer locally, send
+     *     {@link org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope}
+     *     to remote
+     *     <li>The buckets that are older locally, send
+     *     {@link org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus}
+     *     to remote so that remote sends GossipEnvelop.
+     * </ul>
+     *
+     * @param sender the remote member
+     * @param status bucket versions from a remote member
+     * @return a {@link akka.dispatch.Mapper} that gets evaluated in future
+     *
+     */
+    private Mapper<Object, Void> getMapperToProcessRemoteStatus(final ActorRef sender, final GossipStatus status){
+
+        final Map<Address, Long> remoteVersions = status.getVersions();
+
+        return new Mapper<Object, Void>() {
+            @Override
+            public Void apply(Object replyMessage) {
+                if (replyMessage instanceof GetBucketVersionsReply) {
+                    GetBucketVersionsReply reply = (GetBucketVersionsReply) replyMessage;
+                    Map<Address, Long> localVersions = reply.getVersions();
+
+                    //diff between remote list and local
+                    Set<Address> localIsOlder = new HashSet<>();
+                    localIsOlder.addAll(remoteVersions.keySet());
+                    localIsOlder.removeAll(localVersions.keySet());
+
+                    //diff between local list and remote
+                    Set<Address> localIsNewer = new HashSet<>();
+                    localIsNewer.addAll(localVersions.keySet());
+                    localIsNewer.removeAll(remoteVersions.keySet());
+
+
+                    for (Address address : remoteVersions.keySet()){
+
+                        if (localVersions.get(address) == null || remoteVersions.get(address) == null)
+                            continue; //this condition is taken care of by above diffs
+                        if (localVersions.get(address) <  remoteVersions.get(address))
+                            localIsOlder.add(address);
+                        else if (localVersions.get(address) > remoteVersions.get(address))
+                            localIsNewer.add(address);
+                        else
+                            continue;
+                    }
+
+                    if (!localIsOlder.isEmpty())
+                        sendGossipStatusTo(sender, localVersions );
+
+                    if (!localIsNewer.isEmpty())
+                        sendGossipTo(sender, localIsNewer);//send newer buckets to remote
+
+                }
+                return null;
+            }
+        };
+    }
+
+    /**
+     * Processes the message from {@link org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore}
+     * that contains {@link org.opendaylight.controller.remote.rpc.registry.gossip.Bucket}.
+     * These buckets are sent to a remote member encapsulated in
+     * {@link org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope}
+     *
+     * @param sender the remote member that sent
+     *               {@link org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus}
+     *               in reply to which bucket is being sent back
+     * @return a {@link akka.dispatch.Mapper} that gets evaluated in future
+     *
+     */
+    private Mapper<Object, Void> getMapperToSendGossip(final ActorRef sender) {
+
+        return new Mapper<Object, Void>() {
+            @Override
+            public Void apply(Object msg) {
+                if (msg instanceof GetBucketsByMembersReply) {
+                    Map<Address, Bucket> buckets = ((GetBucketsByMembersReply) msg).getBuckets();
+                    log.debug("Buckets to send from {}: {}", selfAddress, buckets);
+                    GossipEnvelope envelope = new GossipEnvelope(selfAddress, sender.path().address(), buckets);
+                    sender.tell(envelope, getSelf());
+                }
+                return null;
+            }
+        };
+    }
+
+    ///
+    ///Getter Setters
+    ///
+    List<Address> getClusterMembers() {
+        return clusterMembers;
+    }
+
+    void setClusterMembers(List<Address> clusterMembers) {
+        this.clusterMembers = clusterMembers;
+    }
+
+    Address getSelfAddress() {
+        return selfAddress;
+    }
+
+    void setSelfAddress(Address selfAddress) {
+        this.selfAddress = selfAddress;
+    }
+}
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Messages.java b/opendaylight/md-sal/sal-remoterpc-connector/src/main/java/org/opendaylight/controller/remote/rpc/registry/gossip/Messages.java
new file mode 100644 (file)
index 0000000..bf8b202
--- /dev/null
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+import akka.actor.Address;
+import com.google.common.base.Preconditions;
+
+import java.io.Serializable;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.ContainsBucketVersions;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.BucketStoreMessages.ContainsBuckets;
+
+
+/**
+ * These messages are used by {@link org.opendaylight.controller.remote.rpc.registry.gossip.BucketStore} and
+ * {@link org.opendaylight.controller.remote.rpc.registry.gossip.Gossiper} actors.
+ */
+public class Messages {
+
+    public static class BucketStoreMessages{
+
+        public static class GetLocalBucket implements Serializable{}
+
+        public static class ContainsBucket implements Serializable {
+            final private Bucket bucket;
+
+            public ContainsBucket(Bucket bucket){
+                Preconditions.checkArgument(bucket != null, "bucket can not be null");
+                this.bucket = bucket;
+            }
+
+            public Bucket getBucket(){
+                return bucket;
+            }
+
+        }
+
+        public static class UpdateBucket extends ContainsBucket implements Serializable {
+            public UpdateBucket(Bucket bucket){
+                super(bucket);
+            }
+        }
+
+        public static class GetLocalBucketReply extends ContainsBucket implements Serializable {
+            public GetLocalBucketReply(Bucket bucket){
+                super(bucket);
+            }
+        }
+
+        public static class GetAllBuckets implements Serializable{}
+
+        public static class GetBucketsByMembers implements Serializable{
+            private Set<Address> members;
+
+            public GetBucketsByMembers(Set<Address> members){
+                Preconditions.checkArgument(members != null, "members can not be null");
+                this.members = members;
+            }
+
+            public Set<Address> getMembers() {
+                return new HashSet<>(members);
+            }
+        }
+
+        public static class ContainsBuckets implements Serializable{
+            private Map<Address, Bucket> buckets;
+
+            public ContainsBuckets(Map<Address, Bucket> buckets){
+                Preconditions.checkArgument(buckets != null, "buckets can not be null");
+                this.buckets = buckets;
+            }
+
+            public Map<Address, Bucket> getBuckets() {
+                Map<Address, Bucket> copy = new HashMap<>(buckets.size());
+
+                for (Map.Entry<Address, Bucket> entry : buckets.entrySet()){
+                    //ignore null entries
+                    if ( (entry.getKey() == null) || (entry.getValue() == null) )
+                        continue;
+                    copy.put(entry.getKey(), entry.getValue());
+                }
+                return new HashMap<>(copy);
+            }
+        }
+
+        public static class GetAllBucketsReply extends ContainsBuckets implements Serializable{
+            public GetAllBucketsReply(Map<Address, Bucket> buckets) {
+                super(buckets);
+            }
+        }
+
+        public static class GetBucketsByMembersReply extends ContainsBuckets implements Serializable{
+            public GetBucketsByMembersReply(Map<Address, Bucket> buckets) {
+                super(buckets);
+            }
+        }
+
+        public static class GetBucketVersions implements Serializable{}
+
+        public static class ContainsBucketVersions implements Serializable{
+            Map<Address, Long> versions;
+
+            public ContainsBucketVersions(Map<Address, Long> versions) {
+                Preconditions.checkArgument(versions != null, "versions can not be null or empty");
+
+                this.versions = versions;
+            }
+
+            public Map<Address, Long> getVersions() {
+                return Collections.unmodifiableMap(versions);
+            }
+
+        }
+
+        public static class GetBucketVersionsReply extends ContainsBucketVersions implements Serializable{
+            public GetBucketVersionsReply(Map<Address, Long> versions) {
+                super(versions);
+            }
+        }
+
+        public static class UpdateRemoteBuckets extends ContainsBuckets implements Serializable{
+            public UpdateRemoteBuckets(Map<Address, Bucket> buckets) {
+                super(buckets);
+            }
+        }
+    }
+
+    public static class GossiperMessages{
+        public static class Tick implements Serializable {}
+
+        public static final class GossipTick extends Tick {}
+
+        public static final class GossipStatus extends ContainsBucketVersions implements Serializable{
+            private Address from;
+
+            public GossipStatus(Address from, Map<Address, Long> versions) {
+                super(versions);
+                this.from = from;
+            }
+
+            public Address from() {
+                return from;
+            }
+        }
+
+        public static final class GossipEnvelope extends ContainsBuckets implements Serializable {
+            private final Address from;
+            private final Address to;
+
+            public GossipEnvelope(Address from, Address to, Map<Address, Bucket> buckets) {
+                super(buckets);
+                Preconditions.checkArgument(to != null, "Recipient of message must not be null");
+                this.to = to;
+                this.from = from;
+            }
+
+            public Address from() {
+                return from;
+            }
+
+            public Address to() {
+                return to;
+            }
+        }
+    }
+}
index 392c1e637d848e1ccc47d43b3a951011a2e25a72..55aa1d6c871d252d0a89ce1772fd078e45cac101 100644 (file)
@@ -25,7 +25,7 @@ import org.opendaylight.controller.remote.rpc.messages.InvokeRoutedRpc;
 import org.opendaylight.controller.remote.rpc.messages.InvokeRpc;
 import org.opendaylight.controller.remote.rpc.messages.RpcResponse;
 import org.opendaylight.controller.remote.rpc.registry.ClusterWrapper;
-import org.opendaylight.controller.remote.rpc.registry.RpcRegistry;
+import org.opendaylight.controller.remote.rpc.registry.RpcRegistryOld;
 import org.opendaylight.controller.sal.common.util.Rpcs;
 import org.opendaylight.controller.sal.connector.api.RpcRouter;
 import org.opendaylight.controller.sal.core.api.Broker;
@@ -69,7 +69,7 @@ public class RpcBrokerTest {
   @Test
   public void testInvokeRpcError() throws URISyntaxException {
     new JavaTestKit(system) {{
-      ActorRef rpcRegistry = system.actorOf(RpcRegistry.props(Mockito.mock(ClusterWrapper.class)));
+      ActorRef rpcRegistry = system.actorOf(RpcRegistryOld.props(Mockito.mock(ClusterWrapper.class)));
       Broker.ProviderSession brokerSession = Mockito.mock(Broker.ProviderSession.class);
       SchemaContext schemaContext = mock(SchemaContext.class);
       ActorRef rpcBroker = system.actorOf(RpcBroker.props(brokerSession, rpcRegistry, schemaContext));
@@ -100,7 +100,7 @@ public class RpcBrokerTest {
   @Test
   public void testInvokeRpc() throws URISyntaxException {
     new JavaTestKit(system) {{
-      ActorRef rpcRegistry = system.actorOf(RpcRegistry.props(mock(ClusterWrapper.class)));
+      ActorRef rpcRegistry = system.actorOf(RpcRegistryOld.props(mock(ClusterWrapper.class)));
       Broker.ProviderSession brokerSession = mock(Broker.ProviderSession.class);
       SchemaContext schemaContext = mock(SchemaContext.class);
       ActorRef rpcBroker = system.actorOf(RpcBroker.props(brokerSession, rpcRegistry, schemaContext));
@@ -141,7 +141,7 @@ public class RpcBrokerTest {
   @Test
   public void testInvokeRoutedRpcError() throws URISyntaxException {
     new JavaTestKit(system) {{
-      ActorRef rpcRegistry = system.actorOf(RpcRegistry.props(Mockito.mock(ClusterWrapper.class)));
+      ActorRef rpcRegistry = system.actorOf(RpcRegistryOld.props(Mockito.mock(ClusterWrapper.class)));
       Broker.ProviderSession brokerSession = Mockito.mock(Broker.ProviderSession.class);
       SchemaContext schemaContext = mock(SchemaContext.class);
       ActorRef rpcBroker = system.actorOf(RpcBroker.props(brokerSession, rpcRegistry, schemaContext));
@@ -172,7 +172,7 @@ public class RpcBrokerTest {
   @Test
   public void testInvokeRoutedRpc() throws URISyntaxException {
     new JavaTestKit(system) {{
-      ActorRef rpcRegistry = system.actorOf(RpcRegistry.props(mock(ClusterWrapper.class)));
+      ActorRef rpcRegistry = system.actorOf(RpcRegistryOld.props(mock(ClusterWrapper.class)));
       Broker.ProviderSession brokerSession = mock(Broker.ProviderSession.class);
       SchemaContext schemaContext = mock(SchemaContext.class);
       ActorRef rpcBroker = system.actorOf(RpcBroker.props(brokerSession, rpcRegistry, schemaContext));
@@ -19,10 +19,10 @@ import java.net.URISyntaxException;
 import java.util.HashSet;
 import java.util.Set;
 
-public class RoutingTableTest {
+public class RoutingTableOldTest {
 
-  private RoutingTable<RpcRouter.RouteIdentifier<?, ?, ?>, String> routingTable =
-      new RoutingTable<>();
+  private RoutingTableOld<RpcRouter.RouteIdentifier<?, ?, ?>, String> routingTable =
+      new RoutingTableOld<>();
 
   @Test
   public void addGlobalRouteNullRouteIdTest() {
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistryOldTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/RpcRegistryOldTest.java
new file mode 100644 (file)
index 0000000..0f711b4
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.remote.rpc.registry;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.testkit.JavaTestKit;
+import junit.framework.Assert;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.opendaylight.controller.remote.rpc.RouteIdentifierImpl;
+import org.opendaylight.controller.remote.rpc.messages.AddRoutedRpc;
+import org.opendaylight.controller.remote.rpc.messages.AddRpc;
+import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpc;
+import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpcReply;
+import org.opendaylight.controller.remote.rpc.messages.GetRpc;
+import org.opendaylight.controller.remote.rpc.messages.GetRpcReply;
+import org.opendaylight.controller.remote.rpc.messages.RemoveRoutedRpc;
+import org.opendaylight.controller.remote.rpc.messages.RemoveRpc;
+import org.opendaylight.controller.sal.connector.api.RpcRouter;
+import org.opendaylight.yangtools.yang.common.QName;
+
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.HashSet;
+import java.util.Set;
+
+public class RpcRegistryOldTest {
+
+  static ActorSystem system;
+
+
+  @BeforeClass
+  public static void setup() {
+    system = ActorSystem.create();
+  }
+
+  @AfterClass
+  public static void teardown() {
+    JavaTestKit.shutdownActorSystem(system);
+    system = null;
+  }
+
+  /**
+   This test add, read and remove an entry in global rpc
+   */
+  @Test
+  public void testGlobalRpc() throws URISyntaxException {
+    new JavaTestKit(system) {{
+      ActorRef rpcRegistry = system.actorOf(RpcRegistryOld.props(Mockito.mock(ClusterWrapper.class)));
+      QName type = new QName(new URI("actor1"), "actor1");
+      RouteIdentifierImpl routeId = new RouteIdentifierImpl(null, type, null);
+      final String route = "actor1";
+
+      AddRpc rpcMsg = new AddRpc(routeId, route);
+      rpcRegistry.tell(rpcMsg, getRef());
+      expectMsgEquals(duration("2 second"), "Success");
+
+      GetRpc getRpc = new GetRpc(routeId);
+      rpcRegistry.tell(getRpc, getRef());
+
+      Boolean getMsg = new ExpectMsg<Boolean>("GetRpcReply") {
+        protected Boolean match(Object in) {
+          if (in instanceof GetRpcReply) {
+            GetRpcReply reply = (GetRpcReply)in;
+            return route.equals(reply.getRoutePath());
+          } else {
+            throw noMatch();
+          }
+        }
+      }.get(); // this extracts the received message
+
+      Assert.assertTrue(getMsg);
+
+      RemoveRpc removeMsg = new RemoveRpc(routeId);
+      rpcRegistry.tell(removeMsg, getRef());
+      expectMsgEquals(duration("2 second"), "Success");
+
+      rpcRegistry.tell(getRpc, getRef());
+
+      Boolean getNullMsg = new ExpectMsg<Boolean>("GetRpcReply") {
+        protected Boolean match(Object in) {
+          if (in instanceof GetRpcReply) {
+            GetRpcReply reply = (GetRpcReply)in;
+            return reply.getRoutePath() == null;
+          } else {
+            throw noMatch();
+          }
+        }
+      }.get();
+      Assert.assertTrue(getNullMsg);
+    }};
+
+  }
+
+  /**
+   This test add, read and remove an entry in routed rpc
+   */
+  @Test
+  public void testRoutedRpc() throws URISyntaxException {
+    new JavaTestKit(system) {{
+      ActorRef rpcRegistry = system.actorOf(RpcRegistryOld.props(Mockito.mock(ClusterWrapper.class)));
+      QName type = new QName(new URI("actor1"), "actor1");
+      RouteIdentifierImpl routeId = new RouteIdentifierImpl(null, type, null);
+      final String route = "actor1";
+
+      Set<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds = new HashSet<>();
+      routeIds.add(routeId);
+
+      AddRoutedRpc rpcMsg = new AddRoutedRpc(routeIds, route);
+      rpcRegistry.tell(rpcMsg, getRef());
+      expectMsgEquals(duration("2 second"), "Success");
+
+      GetRoutedRpc getRpc = new GetRoutedRpc(routeId);
+      rpcRegistry.tell(getRpc, getRef());
+
+      Boolean getMsg = new ExpectMsg<Boolean>("GetRoutedRpcReply") {
+        protected Boolean match(Object in) {
+          if (in instanceof GetRoutedRpcReply) {
+            GetRoutedRpcReply reply = (GetRoutedRpcReply)in;
+            return route.equals(reply.getRoutePath());
+          } else {
+            throw noMatch();
+          }
+        }
+      }.get(); // this extracts the received message
+
+      Assert.assertTrue(getMsg);
+
+      RemoveRoutedRpc removeMsg = new RemoveRoutedRpc(routeIds, route);
+      rpcRegistry.tell(removeMsg, getRef());
+      expectMsgEquals(duration("2 second"), "Success");
+
+      rpcRegistry.tell(getRpc, getRef());
+
+      Boolean getNullMsg = new ExpectMsg<Boolean>("GetRoutedRpcReply") {
+        protected Boolean match(Object in) {
+          if (in instanceof GetRoutedRpcReply) {
+            GetRoutedRpcReply reply = (GetRoutedRpcReply)in;
+            return reply.getRoutePath() == null;
+          } else {
+            throw noMatch();
+          }
+        }
+      }.get();
+      Assert.assertTrue(getNullMsg);
+    }};
+
+  }
+
+}
index d011d331a684e4f0bcbbbf395e18145364dfdf4a..da3942a828182ff3e253e7c6ffcca2f99fdeb8b6 100644 (file)
-/*
- * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-
 package org.opendaylight.controller.remote.rpc.registry;
 
+import akka.actor.ActorPath;
 import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
+import akka.actor.ChildActorPath;
+import akka.actor.Props;
+import akka.japi.Pair;
 import akka.testkit.JavaTestKit;
-import junit.framework.Assert;
+import com.typesafe.config.ConfigFactory;
+import org.junit.After;
 import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.opendaylight.controller.remote.rpc.RouteIdentifierImpl;
-import org.opendaylight.controller.remote.rpc.messages.AddRoutedRpc;
-import org.opendaylight.controller.remote.rpc.messages.AddRpc;
-import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpc;
-import org.opendaylight.controller.remote.rpc.messages.GetRoutedRpcReply;
-import org.opendaylight.controller.remote.rpc.messages.GetRpc;
-import org.opendaylight.controller.remote.rpc.messages.GetRpcReply;
-import org.opendaylight.controller.remote.rpc.messages.RemoveRoutedRpc;
-import org.opendaylight.controller.remote.rpc.messages.RemoveRpc;
 import org.opendaylight.controller.sal.connector.api.RpcRouter;
 import org.opendaylight.yangtools.yang.common.QName;
+import scala.concurrent.Await;
+import scala.concurrent.Future;
+import scala.concurrent.duration.FiniteDuration;
 
 import java.net.URI;
 import java.net.URISyntaxException;
-import java.util.HashSet;
-import java.util.Set;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.AddOrUpdateRoutes;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.RemoveRoutes;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRouters;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.FindRoutersReply;
+import static org.opendaylight.controller.remote.rpc.registry.RpcRegistry.Messages.SetLocalRouter;
 
 public class RpcRegistryTest {
 
-  static ActorSystem system;
-
-
-  @BeforeClass
-  public static void setup() {
-    system = ActorSystem.create();
-  }
-
-  @AfterClass
-  public static void teardown() {
-    JavaTestKit.shutdownActorSystem(system);
-    system = null;
-  }
-
-  /**
-   This test add, read and remove an entry in global rpc
-   */
-  @Test
-  public void testGlobalRpc() throws URISyntaxException {
-    new JavaTestKit(system) {{
-      ActorRef rpcRegistry = system.actorOf(RpcRegistry.props(Mockito.mock(ClusterWrapper.class)));
-      QName type = new QName(new URI("actor1"), "actor1");
-      RouteIdentifierImpl routeId = new RouteIdentifierImpl(null, type, null);
-      final String route = "actor1";
-
-      AddRpc rpcMsg = new AddRpc(routeId, route);
-      rpcRegistry.tell(rpcMsg, getRef());
-      expectMsgEquals(duration("2 second"), "Success");
-
-      GetRpc getRpc = new GetRpc(routeId);
-      rpcRegistry.tell(getRpc, getRef());
-
-      Boolean getMsg = new ExpectMsg<Boolean>("GetRpcReply") {
-        protected Boolean match(Object in) {
-          if (in instanceof GetRpcReply) {
-            GetRpcReply reply = (GetRpcReply)in;
-            return route.equals(reply.getRoutePath());
-          } else {
-            throw noMatch();
-          }
-        }
-      }.get(); // this extracts the received message
+    private static ActorSystem node1;
+    private static ActorSystem node2;
+    private static ActorSystem node3;
 
-      Assert.assertTrue(getMsg);
+    private ActorRef registry1;
+    private ActorRef registry2;
+    private ActorRef registry3;
 
-      RemoveRpc removeMsg = new RemoveRpc(routeId);
-      rpcRegistry.tell(removeMsg, getRef());
-      expectMsgEquals(duration("2 second"), "Success");
+    @BeforeClass
+    public static void setup() throws InterruptedException {
+        Thread.sleep(1000); //give some time for previous test to close netty ports
+        node1 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberA"));
+        node2 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberB"));
+        node3 = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("memberC"));
+    }
 
-      rpcRegistry.tell(getRpc, getRef());
+    @AfterClass
+    public static void teardown(){
+        JavaTestKit.shutdownActorSystem(node1);
+        JavaTestKit.shutdownActorSystem(node2);
+        JavaTestKit.shutdownActorSystem(node3);
+        if (node1 != null)
+            node1.shutdown();
+        if (node2 != null)
+            node2.shutdown();
+        if (node3 != null)
+            node3.shutdown();
 
-      Boolean getNullMsg = new ExpectMsg<Boolean>("GetRpcReply") {
-        protected Boolean match(Object in) {
-          if (in instanceof GetRpcReply) {
-            GetRpcReply reply = (GetRpcReply)in;
-            return reply.getRoutePath() == null;
-          } else {
-            throw noMatch();
-          }
-        }
-      }.get();
-      Assert.assertTrue(getNullMsg);
-    }};
-
-  }
-
-  /**
-   This test add, read and remove an entry in routed rpc
-   */
-  @Test
-  public void testRoutedRpc() throws URISyntaxException {
-    new JavaTestKit(system) {{
-      ActorRef rpcRegistry = system.actorOf(RpcRegistry.props(Mockito.mock(ClusterWrapper.class)));
-      QName type = new QName(new URI("actor1"), "actor1");
-      RouteIdentifierImpl routeId = new RouteIdentifierImpl(null, type, null);
-      final String route = "actor1";
-
-      Set<RpcRouter.RouteIdentifier<?, ?, ?>> routeIds = new HashSet<>();
-      routeIds.add(routeId);
-
-      AddRoutedRpc rpcMsg = new AddRoutedRpc(routeIds, route);
-      rpcRegistry.tell(rpcMsg, getRef());
-      expectMsgEquals(duration("2 second"), "Success");
-
-      GetRoutedRpc getRpc = new GetRoutedRpc(routeId);
-      rpcRegistry.tell(getRpc, getRef());
-
-      Boolean getMsg = new ExpectMsg<Boolean>("GetRoutedRpcReply") {
-        protected Boolean match(Object in) {
-          if (in instanceof GetRoutedRpcReply) {
-            GetRoutedRpcReply reply = (GetRoutedRpcReply)in;
-            return route.equals(reply.getRoutePath());
-          } else {
-            throw noMatch();
-          }
+    }
+
+    @Before
+    public void createRpcRegistry() throws InterruptedException {
+        registry1 = node1.actorOf(Props.create(RpcRegistry.class));
+        registry2 = node2.actorOf(Props.create(RpcRegistry.class));
+        registry3 = node3.actorOf(Props.create(RpcRegistry.class));
+    }
+
+    @After
+    public void stopRpcRegistry() throws InterruptedException {
+        if (registry1 != null)
+            node1.stop(registry1);
+        if (registry2 != null)
+            node2.stop(registry2);
+        if (registry3 != null)
+            node3.stop(registry3);
+    }
+
+    /**
+     * One node cluster.
+     * 1. Register rpc, ensure router can be found
+     * 2. Then remove rpc, ensure its deleted
+     *
+     * @throws URISyntaxException
+     * @throws InterruptedException
+     */
+    @Test
+    public void testAddRemoveRpcOnSameNode() throws URISyntaxException, InterruptedException {
+
+        final JavaTestKit mockBroker = new JavaTestKit(node1);
+
+        //Add rpc on node 1
+        registry1.tell(new SetLocalRouter(mockBroker.getRef()), mockBroker.getRef());
+        registry1.tell(getAddRouteMessage(), mockBroker.getRef());
+
+        Thread.sleep(1000);//
+
+        //find the route on node 1's registry
+        registry1.tell(new FindRouters(createRouteId()), mockBroker.getRef());
+        FindRoutersReply message = mockBroker.expectMsgClass(JavaTestKit.duration("10 second"), FindRoutersReply.class);
+        List<Pair<ActorRef, Long>> pairs = message.getRouterWithUpdateTime();
+
+        validateRouterReceived(pairs, mockBroker.getRef());
+
+        //Now remove rpc
+        registry1.tell(getRemoveRouteMessage(), mockBroker.getRef());
+        Thread.sleep(1000);
+        //find the route on node 1's registry
+        registry1.tell(new FindRouters(createRouteId()), mockBroker.getRef());
+        message = mockBroker.expectMsgClass(JavaTestKit.duration("10 second"), FindRoutersReply.class);
+        pairs = message.getRouterWithUpdateTime();
+
+        Assert.assertTrue(pairs.isEmpty());
+    }
+
+    /**
+     * Three node cluster.
+     * 1. Register rpc on 1 node, ensure its router can be found on other 2.
+     * 2. Remove rpc on 1 node, ensure its removed on other 2.
+     *
+     * @throws URISyntaxException
+     * @throws InterruptedException
+     */
+    @Test
+    public void testRpcAddRemoveInCluster() throws URISyntaxException, InterruptedException {
+
+        validateSystemStartup();
+
+        final JavaTestKit mockBroker1 = new JavaTestKit(node1);
+        final JavaTestKit mockBroker2 = new JavaTestKit(node2);
+        final JavaTestKit mockBroker3 = new JavaTestKit(node3);
+
+        //Add rpc on node 1
+        registry1.tell(new SetLocalRouter(mockBroker1.getRef()), mockBroker1.getRef());
+        registry1.tell(getAddRouteMessage(), mockBroker1.getRef());
+
+        Thread.sleep(1000);// give some time for bucket store data sync
+
+        //find the route in node 2's registry
+        List<Pair<ActorRef, Long>> pairs = findRouters(registry2, mockBroker2);
+        validateRouterReceived(pairs, mockBroker1.getRef());
+
+        //find the route in node 3's registry
+        pairs = findRouters(registry3, mockBroker3);
+        validateRouterReceived(pairs, mockBroker1.getRef());
+
+        //Now remove
+        registry1.tell(getRemoveRouteMessage(), mockBroker1.getRef());
+        Thread.sleep(1000);// give some time for bucket store data sync
+
+        pairs = findRouters(registry2, mockBroker2);
+        Assert.assertTrue(pairs.isEmpty());
+
+        pairs = findRouters(registry3, mockBroker3);
+        Assert.assertTrue(pairs.isEmpty());
+    }
+
+    /**
+     * Three node cluster.
+     * Register rpc on 2 nodes. Ensure 2 routers are found on 3rd.
+     *
+     * @throws Exception
+     */
+    @Test
+    public void testAnRpcAddedOnMultiNodesShouldReturnMultiRouter() throws Exception {
+
+        validateSystemStartup();
+
+        final JavaTestKit mockBroker1 = new JavaTestKit(node1);
+        final JavaTestKit mockBroker2 = new JavaTestKit(node2);
+        final JavaTestKit mockBroker3 = new JavaTestKit(node3);
+
+        //Thread.sleep(5000);//let system come up
+
+        //Add rpc on node 1
+        registry1.tell(new SetLocalRouter(mockBroker1.getRef()), mockBroker1.getRef());
+        registry1.tell(getAddRouteMessage(), mockBroker1.getRef());
+
+        //Add same rpc on node 2
+        registry2.tell(new SetLocalRouter(mockBroker2.getRef()), mockBroker2.getRef());
+        registry2.tell(getAddRouteMessage(), mockBroker2.getRef());
+
+        registry3.tell(new SetLocalRouter(mockBroker3.getRef()), mockBroker3.getRef());
+        Thread.sleep(1000);// give some time for bucket store data sync
+
+        //find the route in node 3's registry
+        registry3.tell(new FindRouters(createRouteId()), mockBroker3.getRef());
+        FindRoutersReply message = mockBroker3.expectMsgClass(JavaTestKit.duration("10 second"), FindRoutersReply.class);
+        List<Pair<ActorRef, Long>> pairs = message.getRouterWithUpdateTime();
+
+        validateMultiRouterReceived(pairs, mockBroker1.getRef(), mockBroker2.getRef());
+
+    }
+
+    private List<Pair<ActorRef, Long>> findRouters(ActorRef registry, JavaTestKit receivingActor) throws URISyntaxException {
+        registry.tell(new FindRouters(createRouteId()), receivingActor.getRef());
+        FindRoutersReply message = receivingActor.expectMsgClass(JavaTestKit.duration("10 second"), FindRoutersReply.class);
+        return message.getRouterWithUpdateTime();
+    }
+
+    private void validateMultiRouterReceived(List<Pair<ActorRef, Long>> actual, ActorRef... expected) {
+        Assert.assertTrue(actual != null);
+        Assert.assertTrue(actual.size() == expected.length);
+    }
+
+    private void validateRouterReceived(List<Pair<ActorRef, Long>> actual, ActorRef expected){
+        Assert.assertTrue(actual != null);
+        Assert.assertTrue(actual.size() == 1);
+
+        for (Pair<ActorRef, Long> pair : actual){
+            Assert.assertTrue(expected.path().uid() == pair.first().path().uid());
         }
-      }.get(); // this extracts the received message
+    }
+
+    private void validateSystemStartup() throws InterruptedException {
+
+        Thread.sleep(5000);
+        ActorPath gossiper1Path = new ChildActorPath(new ChildActorPath(registry1.path(), "store"), "gossiper");
+        ActorPath gossiper2Path = new ChildActorPath(new ChildActorPath(registry2.path(), "store"), "gossiper");
+        ActorPath gossiper3Path = new ChildActorPath(new ChildActorPath(registry3.path(), "store"), "gossiper");
+
+        ActorSelection gossiper1 = node1.actorSelection(gossiper1Path);
+        ActorSelection gossiper2 = node2.actorSelection(gossiper2Path);
+        ActorSelection gossiper3 = node3.actorSelection(gossiper3Path);
+
 
-      Assert.assertTrue(getMsg);
+        if (!resolveReference(gossiper1, gossiper2, gossiper3))
+            Assert.fail("Could not find gossipers");
+    }
 
-      RemoveRoutedRpc removeMsg = new RemoveRoutedRpc(routeIds, route);
-      rpcRegistry.tell(removeMsg, getRef());
-      expectMsgEquals(duration("2 second"), "Success");
+    private Boolean resolveReference(ActorSelection... gossipers) throws InterruptedException {
 
-      rpcRegistry.tell(getRpc, getRef());
+        Boolean resolved = true;
 
-      Boolean getNullMsg = new ExpectMsg<Boolean>("GetRoutedRpcReply") {
-        protected Boolean match(Object in) {
-          if (in instanceof GetRoutedRpcReply) {
-            GetRoutedRpcReply reply = (GetRoutedRpcReply)in;
-            return reply.getRoutePath() == null;
-          } else {
-            throw noMatch();
-          }
+        for (int i=0; i< 5; i++) {
+            Thread.sleep(1000);
+            for (ActorSelection gossiper : gossipers) {
+                Future<ActorRef> future = gossiper.resolveOne(new FiniteDuration(5000, TimeUnit.MILLISECONDS));
+
+                ActorRef ref = null;
+                try {
+                    ref = Await.result(future, new FiniteDuration(10000, TimeUnit.MILLISECONDS));
+                } catch (Exception e) {
+                    e.printStackTrace();
+                }
+
+                if (ref == null)
+                    resolved = false;
+            }
+
+            if (resolved) break;
         }
-      }.get();
-      Assert.assertTrue(getNullMsg);
-    }};
+        return resolved;
+    }
+
+    private AddOrUpdateRoutes getAddRouteMessage() throws URISyntaxException {
+        return new AddOrUpdateRoutes(createRouteIds());
+    }
+
+    private RemoveRoutes getRemoveRouteMessage() throws URISyntaxException {
+        return new RemoveRoutes(createRouteIds());
+    }
 
-  }
+    private List<RpcRouter.RouteIdentifier<?,?,?>> createRouteIds() throws URISyntaxException {
+        QName type = new QName(new URI("/mockrpc"), "mockrpc");
+        List<RpcRouter.RouteIdentifier<?,?,?>> routeIds = new ArrayList<>();
+        routeIds.add(new RouteIdentifierImpl(null, type, null));
+        return routeIds;
+    }
 
-}
+    private RpcRouter.RouteIdentifier<?,?,?> createRouteId() throws URISyntaxException {
+        QName type = new QName(new URI("/mockrpc"), "mockrpc");
+        return new RouteIdentifierImpl(null, type, null);
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStoreTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/gossip/BucketStoreTest.java
new file mode 100644 (file)
index 0000000..7e87da0
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSystem;
+import akka.actor.Props;
+import akka.testkit.TestActorRef;
+import akka.testkit.TestProbe;
+import com.typesafe.config.ConfigFactory;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.opendaylight.controller.remote.rpc.TerminationMonitor;
+
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.spy;
+
+public class BucketStoreTest {
+
+    private static ActorSystem system;
+    private static BucketStore store;
+
+    private BucketStore mockStore;
+
+    @BeforeClass
+    public static void setup() {
+
+        system = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("unit-test"));
+        system.actorOf(Props.create(TerminationMonitor.class), "termination-monitor");
+
+        store = createStore();
+    }
+
+    @AfterClass
+    public static void teardown() {
+        system.shutdown();
+    }
+
+    @Before
+    public void createMocks(){
+        mockStore = spy(store);
+    }
+
+    @After
+    public void resetMocks(){
+        reset(mockStore);
+    }
+
+    @Test
+    public void testReceiveUpdateBucket_WhenInputBucketShouldUpdateVersion(){
+        Bucket bucket = new BucketImpl();
+        Long expectedVersion = bucket.getVersion();
+
+        mockStore.receiveUpdateBucket(bucket);
+
+        Assert.assertEquals(bucket, mockStore.getLocalBucket());
+        Assert.assertEquals(expectedVersion, mockStore.getLocalBucket().getVersion());
+    }
+
+    /**
+     * Create BucketStore actor and returns the underlying instance of BucketStore class.
+     *
+     * @return instance of BucketStore class
+     */
+    private static BucketStore createStore(){
+        TestProbe mockActor = new TestProbe(system);
+        ActorRef mockGossiper = mockActor.ref();
+        final Props props = Props.create(BucketStore.class, mockGossiper);
+        final TestActorRef<BucketStore> testRef = TestActorRef.create(system, props, "testStore");
+
+        return testRef.underlyingActor();
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/gossip/GossiperTest.java b/opendaylight/md-sal/sal-remoterpc-connector/src/test/java/org/opendaylight/controller/remote/rpc/registry/gossip/GossiperTest.java
new file mode 100644 (file)
index 0000000..f076c13
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.remote.rpc.registry.gossip;
+
+import akka.actor.ActorSystem;
+import akka.actor.Address;
+import akka.actor.Props;
+import akka.testkit.TestActorRef;
+import com.typesafe.config.ConfigFactory;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.opendaylight.controller.remote.rpc.TerminationMonitor;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyMap;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipEnvelope;
+import static org.opendaylight.controller.remote.rpc.registry.gossip.Messages.GossiperMessages.GossipStatus;
+
+
+public class GossiperTest {
+
+    private static ActorSystem system;
+    private static Gossiper gossiper;
+
+    private Gossiper mockGossiper;
+
+    @BeforeClass
+    public static void setup() throws InterruptedException {
+        Thread.sleep(1000);//give some time for previous test to stop the system. Netty port conflict arises otherwise.
+        system = ActorSystem.create("opendaylight-rpc", ConfigFactory.load().getConfig("unit-test"));
+        system.actorOf(Props.create(TerminationMonitor.class), "termination-monitor");
+
+        gossiper = createGossiper();
+    }
+
+    @AfterClass
+    public static void teardown() {
+        if (system != null)
+            system.shutdown();
+    }
+
+    @Before
+    public void createMocks(){
+        mockGossiper = spy(gossiper);
+    }
+
+    @After
+    public void resetMocks(){
+        reset(mockGossiper);
+
+    }
+
+    @Test
+    public void testReceiveGossipTick_WhenNoRemoteMemberShouldIgnore(){
+
+        mockGossiper.setClusterMembers(Collections.EMPTY_LIST);
+        doNothing().when(mockGossiper).getLocalStatusAndSendTo(any(Address.class));
+        mockGossiper.receiveGossipTick();
+        verify(mockGossiper, times(0)).getLocalStatusAndSendTo(any(Address.class));
+    }
+
+    @Test
+    public void testReceiveGossipTick_WhenRemoteMemberExistsShouldSendStatus(){
+        List<Address> members = new ArrayList<>();
+        Address remote = new Address("tcp", "member");
+        members.add(remote);
+
+        mockGossiper.setClusterMembers(members);
+        doNothing().when(mockGossiper).getLocalStatusAndSendTo(any(Address.class));
+        mockGossiper.receiveGossipTick();
+        verify(mockGossiper, times(1)).getLocalStatusAndSendTo(any(Address.class));
+    }
+
+    @Test
+    public void testReceiveGossipStatus_WhenSenderIsNonMemberShouldIgnore(){
+
+        Address nonMember = new Address("tcp", "non-member");
+        GossipStatus remoteStatus = new GossipStatus(nonMember, mock(Map.class));
+
+        //add a member
+        List<Address> members = new ArrayList<>();
+        members.add(new Address("tcp", "member"));
+
+        mockGossiper.setClusterMembers(members);
+        mockGossiper.receiveGossipStatus(remoteStatus);
+        verify(mockGossiper, times(0)).getSender();
+    }
+
+    @Test
+    public void testReceiveGossip_WhenNotAddressedToSelfShouldIgnore(){
+        Address notSelf = new Address("tcp", "not-self");
+
+        GossipEnvelope envelope = new GossipEnvelope(notSelf, notSelf, mock(Map.class));
+        doNothing().when(mockGossiper).updateRemoteBuckets(anyMap());
+        mockGossiper.receiveGossip(envelope);
+        verify(mockGossiper, times(0)).updateRemoteBuckets(anyMap());
+    }
+
+    /**
+     * Create Gossiper actor and return the underlying instance of Gossiper class.
+     *
+     * @return instance of Gossiper class
+     */
+    private static Gossiper createGossiper(){
+
+        final Props props = Props.create(Gossiper.class, false);
+        final TestActorRef<Gossiper> testRef = TestActorRef.create(system, props, "testGossiper");
+
+        return testRef.underlyingActor();
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-remoterpc-connector/src/test/resources/application.conf b/opendaylight/md-sal/sal-remoterpc-connector/src/test/resources/application.conf
new file mode 100644 (file)
index 0000000..874d3fc
--- /dev/null
@@ -0,0 +1,116 @@
+odl-cluster{
+  akka {
+    loglevel = "INFO"
+    #log-config-on-start = on
+
+    actor {
+      provider = "akka.cluster.ClusterActorRefProvider"
+      debug{
+        #autoreceive = on
+        #lifecycle = on
+
+      }
+    }
+    remote {
+      log-received-messages = on
+      log-sent-messages = on
+
+      log-remote-lifecycle-events = off
+      netty.tcp {
+        hostname = "localhost"
+        port = 2551
+      }
+    }
+
+    cluster {
+      seed-nodes = ["akka.tcp://opendaylight-rpc@localhost:2551"]
+
+      auto-down-unreachable-after = 10s
+    }
+  }
+}
+unit-test{
+  akka {
+    loglevel = "INFO"
+    loggers = ["akka.event.slf4j.Slf4jLogger"]
+    actor {
+      provider = "akka.cluster.ClusterActorRefProvider"
+    }
+  }
+}
+
+memberA{
+  akka {
+    loglevel = "INFO"
+    loggers = ["akka.event.slf4j.Slf4jLogger"]
+    actor {
+      provider = "akka.cluster.ClusterActorRefProvider"
+    }
+    remote {
+      log-received-messages = off
+      log-sent-messages = off
+
+      log-remote-lifecycle-events = off
+      netty.tcp {
+        hostname = "localhost"
+        port = 2551
+      }
+    }
+
+    cluster {
+      seed-nodes = ["akka.tcp://opendaylight-rpc@localhost:2551"]
+
+      auto-down-unreachable-after = 10s
+    }
+  }
+}
+memberB{
+  akka {
+    loglevel = "INFO"
+    loggers = ["akka.event.slf4j.Slf4jLogger"]
+    actor {
+      provider = "akka.cluster.ClusterActorRefProvider"
+    }
+    remote {
+      log-received-messages = off
+      log-sent-messages = off
+
+      log-remote-lifecycle-events = off
+      netty.tcp {
+        hostname = "localhost"
+        port = 2552
+      }
+    }
+
+    cluster {
+      seed-nodes = ["akka.tcp://opendaylight-rpc@localhost:2551"]
+
+      auto-down-unreachable-after = 10s
+    }
+  }
+}
+memberC{
+  akka {
+    loglevel = "INFO"
+    loggers = ["akka.event.slf4j.Slf4jLogger"]
+    actor {
+      provider = "akka.cluster.ClusterActorRefProvider"
+    }
+    remote {
+      log-received-messages = off
+      log-sent-messages = off
+
+      log-remote-lifecycle-events = off
+      netty.tcp {
+        hostname = "localhost"
+        port = 2553
+      }
+    }
+
+    cluster {
+      seed-nodes = ["akka.tcp://opendaylight-rpc@localhost:2551"]
+
+      auto-down-unreachable-after = 10s
+    }
+  }
+}
\ No newline at end of file
index 6d050cf4252b141a60fb04b550cda7d923c092b3..fa91f0398d75cac155269ab83dd6f804e940acec 100644 (file)
   <artifactId>sal-rest-connector-config</artifactId>
   <description>Configuration files for sal-rest-connector</description>
   <packaging>jar</packaging>
+    <build>
+    <plugins>
+        <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>attach-artifacts</id>
+            <goals>
+              <goal>attach-artifact</goal>
+            </goals>
+            <phase>package</phase>
+            <configuration>
+              <artifacts>
+                <artifact>
+                  <file>${project.build.directory}/classes/initial/10-rest-connector.xml</file>
+                  <type>xml</type>
+                  <classifier>config</classifier>
+                </artifact>
+              </artifacts>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
 </project>
index 2fdc8c7d1ef2e0873db819c2cb1b68cf88fb41d9..3be423c35628d06378ead78b344059a15dcc3ed6 100644 (file)
@@ -14,7 +14,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
         <module>
           <type xmlns:rest="urn:opendaylight:params:xml:ns:yang:controller:md:sal:rest:connector">rest:rest-connector-impl</type>
           <name>rest-connector-default-impl</name>
-          <websocket-port>8181</websocket-port>
+          <websocket-port>8185</websocket-port>
           <dom-broker>
             <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
             <name>dom-broker</name>
@@ -35,4 +35,7 @@ and is available at http://www.eclipse.org/legal/epl-v10.html
       </services>
     </data>
   </configuration>
+  <required-capabilities>
+      <capability>urn:opendaylight:params:xml:ns:yang:controller:md:sal:rest:connector?module=opendaylight-rest-connector&amp;revision=2014-07-24</capability>
+  </required-capabilities>
 </snapshot>
index e8701f37e5846e72e9c177ca2188969c7fb9a9da..8dbc5b50ee5dbd8ac374ef7d1452384442923085 100644 (file)
@@ -7,20 +7,12 @@
  */
 package org.opendaylight.controller.sal.restconf.impl;
 
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
-import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
-
 import com.google.common.base.Optional;
 import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.ListenableFuture;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import javax.ws.rs.core.Response.Status;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker.DataChangeScope;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFailedException;
 import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
 import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationOperation;
@@ -45,6 +37,16 @@ import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import javax.ws.rs.core.Response.Status;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+
+import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.CONFIGURATION;
+import static org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType.OPERATIONAL;
+
 public class BrokerFacade {
     private final static Logger LOG = LoggerFactory.getLogger(BrokerFacade.class);
 
@@ -255,15 +257,20 @@ public class BrokerFacade {
             currentArguments.add(currentArg);
             YangInstanceIdentifier currentPath = YangInstanceIdentifier.create(currentArguments);
 
-            final Optional<NormalizedNode<?, ?>> datastoreData;
+            final Boolean exists;
+
             try {
-                datastoreData = rwTx.read(store, currentPath).get();
-            } catch (InterruptedException | ExecutionException e) {
+
+                CheckedFuture<Boolean, ReadFailedException> future =
+                    rwTx.exists(store, currentPath);
+                exists = future.checkedGet();
+            } catch (ReadFailedException e) {
                 LOG.error("Failed to read pre-existing data from store {} path {}", store, currentPath, e);
                 throw new IllegalStateException("Failed to read pre-existing data", e);
             }
 
-            if (!datastoreData.isPresent() && iterator.hasNext()) {
+
+            if (!exists && iterator.hasNext()) {
                 rwTx.merge(store, currentPath, currentOp.createDefault(currentArg));
             }
         }
index 73ca02c505f47741ca4e38c071a780a0f32e1cdf..fac6c80564784759e08aef3db46ebada1dba69fe 100644 (file)
@@ -632,7 +632,8 @@ public class RestconfImpl implements RestconfService {
         NormalizedNode<?, ?> data = null;
         YangInstanceIdentifier normalizedII;
         if (mountPoint != null) {
-            normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData.getInstanceIdentifier());
+            normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData
+                    .getInstanceIdentifier());
             data = broker.readConfigurationData(mountPoint, normalizedII);
         } else {
             normalizedII = controllerContext.toNormalized(iiWithData.getInstanceIdentifier());
@@ -695,7 +696,8 @@ public class RestconfImpl implements RestconfService {
         NormalizedNode<?, ?> data = null;
         YangInstanceIdentifier normalizedII;
         if (mountPoint != null) {
-            normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData.getInstanceIdentifier());
+            normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData
+                    .getInstanceIdentifier());
             data = broker.readOperationalData(mountPoint, normalizedII);
         } else {
             normalizedII = controllerContext.toNormalized(iiWithData.getInstanceIdentifier());
@@ -730,7 +732,8 @@ public class RestconfImpl implements RestconfService {
 
         try {
             if (mountPoint != null) {
-                normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData.getInstanceIdentifier());
+                normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData
+                        .getInstanceIdentifier());
                 broker.commitConfigurationDataPut(mountPoint, normalizedII, datastoreNormalizedNode).get();
             } else {
                 normalizedII = controllerContext.toNormalized(iiWithData.getInstanceIdentifier());
@@ -842,7 +845,8 @@ public class RestconfImpl implements RestconfService {
 
         try {
             if (mountPoint != null) {
-                normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData.getInstanceIdentifier());
+                normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData
+                        .getInstanceIdentifier());
                 broker.commitConfigurationDataPost(mountPoint, normalizedII, datastoreNormalizedData);
             } else {
                 normalizedII = controllerContext.toNormalized(iiWithData.getInstanceIdentifier());
@@ -886,7 +890,8 @@ public class RestconfImpl implements RestconfService {
 
         try {
             if (mountPoint != null) {
-                normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData.getInstanceIdentifier());
+                normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData
+                        .getInstanceIdentifier());
                 broker.commitConfigurationDataPost(mountPoint, normalizedII, datastoreNormalizedData);
 
             } else {
@@ -908,7 +913,8 @@ public class RestconfImpl implements RestconfService {
 
         try {
             if (mountPoint != null) {
-                normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData.getInstanceIdentifier());
+                normalizedII = new DataNormalizer(mountPoint.getSchemaContext()).toNormalized(iiWithData
+                        .getInstanceIdentifier());
                 broker.commitConfigurationDataDelete(mountPoint, normalizedII);
             } else {
                 normalizedII = controllerContext.toNormalized(iiWithData.getInstanceIdentifier());
@@ -1082,7 +1088,12 @@ public class RestconfImpl implements RestconfService {
             iiBuilder = YangInstanceIdentifier.builder(iiOriginal);
         }
 
-        iiBuilder.node(schemaOfData.getQName());
+        if ((schemaOfData instanceof ListSchemaNode)) {
+            HashMap<QName, Object> keys = this.resolveKeysFromData(((ListSchemaNode) schemaOfData), data);
+            iiBuilder.nodeWithKey(schemaOfData.getQName(), keys);
+        } else {
+            iiBuilder.node(schemaOfData.getQName());
+        }
 
         YangInstanceIdentifier instance = iiBuilder.toInstance();
         DOMMountPoint mountPoint = null;
@@ -1093,6 +1104,34 @@ public class RestconfImpl implements RestconfService {
         return new InstanceIdWithSchemaNode(instance, schemaOfData, mountPoint);
     }
 
+    private HashMap<QName, Object> resolveKeysFromData(final ListSchemaNode listNode, final CompositeNode dataNode) {
+        final HashMap<QName, Object> keyValues = new HashMap<QName, Object>();
+        List<QName> _keyDefinition = listNode.getKeyDefinition();
+        for (final QName key : _keyDefinition) {
+            SimpleNode<? extends Object> head = null;
+            String localName = key.getLocalName();
+            List<SimpleNode<? extends Object>> simpleNodesByName = dataNode.getSimpleNodesByName(localName);
+            if (simpleNodesByName != null) {
+                head = Iterables.getFirst(simpleNodesByName, null);
+            }
+
+            Object dataNodeKeyValueObject = null;
+            if (head != null) {
+                dataNodeKeyValueObject = head.getValue();
+            }
+
+            if (dataNodeKeyValueObject == null) {
+                throw new RestconfDocumentedException("Data contains list \"" + dataNode.getNodeType().getLocalName()
+                        + "\" which does not contain key: \"" + key.getLocalName() + "\"", ErrorType.PROTOCOL,
+                        ErrorTag.INVALID_VALUE);
+            }
+
+            keyValues.put(key, dataNodeKeyValueObject);
+        }
+
+        return keyValues;
+    }
+
     private boolean endsWithMountPoint(final String identifier) {
         return identifier.endsWith(ControllerContext.MOUNT) || identifier.endsWith(ControllerContext.MOUNT + "/");
     }
@@ -1431,7 +1470,8 @@ public class RestconfImpl implements RestconfService {
                 "It wasn't possible to correctly interpret data."));
     }
 
-    private NormalizedNode<?, ?> compositeNodeToDatastoreNormalizedNode(final CompositeNode compNode, final DataSchemaNode schema) {
+    private NormalizedNode<?, ?> compositeNodeToDatastoreNormalizedNode(final CompositeNode compNode,
+            final DataSchemaNode schema) {
         List<Node<?>> lst = new ArrayList<Node<?>>();
         lst.add(compNode);
         if (schema instanceof ContainerSchemaNode) {
@@ -1448,7 +1488,8 @@ public class RestconfImpl implements RestconfService {
                 "It wasn't possible to translate specified data to datastore readable form."));
     }
 
-    private InstanceIdWithSchemaNode normalizeInstanceIdentifierWithSchemaNode(final InstanceIdWithSchemaNode iiWithSchemaNode) {
+    private InstanceIdWithSchemaNode normalizeInstanceIdentifierWithSchemaNode(
+            final InstanceIdWithSchemaNode iiWithSchemaNode) {
         return normalizeInstanceIdentifierWithSchemaNode(iiWithSchemaNode, false);
     }
 
@@ -1459,8 +1500,8 @@ public class RestconfImpl implements RestconfService {
                 iiWithSchemaNode.getMountPoint());
     }
 
-    private YangInstanceIdentifier instanceIdentifierToReadableFormForNormalizeNode(final YangInstanceIdentifier instIdentifier,
-            final boolean unwrapLastListNode) {
+    private YangInstanceIdentifier instanceIdentifierToReadableFormForNormalizeNode(
+            final YangInstanceIdentifier instIdentifier, final boolean unwrapLastListNode) {
         Preconditions.checkNotNull(instIdentifier, "Instance identifier can't be null");
         final List<PathArgument> result = new ArrayList<PathArgument>();
         final Iterator<PathArgument> iter = instIdentifier.getPathArguments().iterator();
index 146e88299ac6133e71fbf240cea3953956dbbd78..6b2583024036b6b7ef39863f64f3fa9069d19e94 100644 (file)
@@ -8,21 +8,9 @@
 
 package org.opendaylight.controller.sal.restconf.impl.test;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertSame;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.inOrder;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.verifyNoMoreInteractions;
-import static org.mockito.Mockito.when;
-
 import com.google.common.base.Optional;
 import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.Futures;
-import java.util.concurrent.Future;
 import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -55,6 +43,19 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdent
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.impl.schema.Builders;
 
+import java.util.concurrent.Future;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertSame;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.verifyNoMoreInteractions;
+import static org.mockito.Mockito.when;
+
 /**
  * Unit tests for BrokerFacade.
  *
@@ -111,6 +112,11 @@ public class BrokerFacadeTest {
         return  Futures.immediateCheckedFuture(Optional.<NormalizedNode<?, ?>> of(dummyNode));
     }
 
+    private CheckedFuture<Boolean,ReadFailedException> wrapExistence(Boolean exists) {
+        return  Futures.immediateCheckedFuture(exists);
+    }
+
+
     /**
      * Value of this node shouldn't be important for testing purposes
      */
@@ -189,8 +195,14 @@ public class BrokerFacadeTest {
         CheckedFuture<Void, TransactionCommitFailedException> expFuture = mock(CheckedFuture.class);
 
         NormalizedNode<?, ?> dummyNode2 = createDummyNode("dummy:namespace2", "2014-07-01", "dummy local name2");
+
         when(rwTransaction.read(eq(LogicalDatastoreType.CONFIGURATION), any(YangInstanceIdentifier.class))).thenReturn(
                 wrapDummyNode(dummyNode2));
+
+        when(rwTransaction.exists(eq(LogicalDatastoreType.CONFIGURATION), any(YangInstanceIdentifier.class))).thenReturn(
+            wrapExistence(true));
+
+
         when(rwTransaction.submit()).thenReturn(expFuture);
 
         CheckedFuture<Void, TransactionCommitFailedException> actualFuture = brokerFacade.commitConfigurationDataPost(
index 0c8b4d5a2ada924025aa40ff805d9c4613606e66..1141e1d72e212204ed21ffcf725e4226835e427b 100644 (file)
       <artifactId>mockito-all</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.opendaylight.yangtools</groupId>
+      <artifactId>yang-data-api</artifactId>
+    </dependency>
   </dependencies>
 
   <build>
index 82409d2e4033050833c7285927e5db23d0d20fc6..633d419fa9ae4219e2d276eb530fe4780514a88d 100644 (file)
@@ -7,8 +7,8 @@
  */
 package org.opendaylight.controller.sal.rest.doc.impl;
 
+import com.google.common.base.Preconditions;
 import javax.ws.rs.core.UriInfo;
-
 import org.opendaylight.controller.sal.core.api.model.SchemaService;
 import org.opendaylight.controller.sal.rest.doc.swagger.ApiDeclaration;
 import org.opendaylight.controller.sal.rest.doc.swagger.ResourceList;
@@ -16,11 +16,8 @@ import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Preconditions;
-
 /**
- * This class gathers all yang defined {@link Module}s and generates Swagger
- * compliant documentation.
+ * This class gathers all yang defined {@link Module}s and generates Swagger compliant documentation.
  */
 public class ApiDocGenerator extends BaseYangSwaggerGenerator {
 
index 5ba8b26bc1eb6bb69ac0d5e3564dbbfe5f8c27cf..1b2718251446dde021d3cc62b794b990f8852dc4 100644 (file)
@@ -7,6 +7,12 @@
  */
 package org.opendaylight.controller.sal.rest.doc.impl;
 
+import static org.opendaylight.controller.sal.rest.doc.util.RestDocgenUtil.resolvePathArgumentsName;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+import com.fasterxml.jackson.datatype.jsonorg.JsonOrgModule;
+import com.google.common.base.Preconditions;
 import java.io.IOException;
 import java.net.URI;
 import java.text.DateFormat;
@@ -22,9 +28,7 @@ import java.util.List;
 import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeSet;
-
 import javax.ws.rs.core.UriInfo;
-
 import org.json.JSONException;
 import org.json.JSONObject;
 import org.opendaylight.controller.sal.rest.doc.model.builder.OperationBuilder;
@@ -46,11 +50,6 @@ import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.SerializationFeature;
-import com.fasterxml.jackson.datatype.jsonorg.JsonOrgModule;
-import com.google.common.base.Preconditions;
-
 public class BaseYangSwaggerGenerator {
 
     private static Logger _logger = LoggerFactory.getLogger(BaseYangSwaggerGenerator.class);
@@ -75,8 +74,7 @@ public class BaseYangSwaggerGenerator {
      * @param operType
      * @return list of modules converted to swagger compliant resource list.
      */
-    public ResourceList getResourceListing(UriInfo uriInfo, SchemaContext schemaContext,
-            String context) {
+    public ResourceList getResourceListing(UriInfo uriInfo, SchemaContext schemaContext, String context) {
 
         ResourceList resourceList = createResourceList();
 
@@ -88,11 +86,9 @@ public class BaseYangSwaggerGenerator {
 
         for (Module module : modules) {
             String revisionString = SIMPLE_DATE_FORMAT.format(module.getRevision());
-
             Resource resource = new Resource();
             _logger.debug("Working on [{},{}]...", module.getName(), revisionString);
-            ApiDeclaration doc = getApiDeclaration(module.getName(), revisionString, uriInfo,
-                    schemaContext, context);
+            ApiDeclaration doc = getApiDeclaration(module.getName(), revisionString, uriInfo, schemaContext, context);
 
             if (doc != null) {
                 resource.setPath(generatePath(uriInfo, module.getName(), revisionString));
@@ -119,8 +115,7 @@ public class BaseYangSwaggerGenerator {
         return uri.toASCIIString();
     }
 
-    public ApiDeclaration getApiDeclaration(String module, String revision, UriInfo uriInfo,
-            SchemaContext schemaContext, String context) {
+    public ApiDeclaration getApiDeclaration(String module, String revision, UriInfo uriInfo, SchemaContext schemaContext, String context) {
         Date rev = null;
         try {
             rev = SIMPLE_DATE_FORMAT.parse(revision);
@@ -128,17 +123,15 @@ public class BaseYangSwaggerGenerator {
             throw new IllegalArgumentException(e);
         }
         Module m = schemaContext.findModuleByName(module, rev);
-        Preconditions.checkArgument(m != null, "Could not find module by name,revision: " + module
-                + "," + revision);
+        Preconditions.checkArgument(m != null, "Could not find module by name,revision: " + module + "," + revision);
 
-        return getApiDeclaration(m, rev, uriInfo, schemaContext, context);
+        return getApiDeclaration(m, rev, uriInfo, context, schemaContext);
     }
 
-    public ApiDeclaration getApiDeclaration(Module module, Date revision, UriInfo uriInfo,
-            SchemaContext schemaContext, String context) {
+    public ApiDeclaration getApiDeclaration(Module module, Date revision, UriInfo uriInfo, String context, SchemaContext schemaContext) {
         String basePath = createBasePathFromUriInfo(uriInfo);
 
-        ApiDeclaration doc = getSwaggerDocSpec(module, basePath, context);
+        ApiDeclaration doc = getSwaggerDocSpec(module, basePath, context, schemaContext);
         if (doc != null) {
             return doc;
         }
@@ -152,12 +145,12 @@ public class BaseYangSwaggerGenerator {
             portPart = ":" + port;
         }
         String basePath = new StringBuilder(uriInfo.getBaseUri().getScheme()).append("://")
-                .append(uriInfo.getBaseUri().getHost()).append(portPart).append("/")
-                .append(RESTCONF_CONTEXT_ROOT).toString();
+                .append(uriInfo.getBaseUri().getHost()).append(portPart).append("/").append(RESTCONF_CONTEXT_ROOT)
+                .toString();
         return basePath;
     }
 
-    public ApiDeclaration getSwaggerDocSpec(Module m, String basePath, String context) {
+    public ApiDeclaration getSwaggerDocSpec(Module m, String basePath, String context, SchemaContext schemaContext) {
         ApiDeclaration doc = createApiDeclaration(basePath);
 
         List<Api> apis = new ArrayList<Api>();
@@ -167,22 +160,21 @@ public class BaseYangSwaggerGenerator {
         for (DataSchemaNode node : dataSchemaNodes) {
             if ((node instanceof ListSchemaNode) || (node instanceof ContainerSchemaNode)) {
 
-                _logger.debug("Is Configuration node [{}] [{}]", node.isConfiguration(), node
-                        .getQName().getLocalName());
+                _logger.debug("Is Configuration node [{}] [{}]", node.isConfiguration(), node.getQName().getLocalName());
 
                 List<Parameter> pathParams = new ArrayList<Parameter>();
-                String resourcePath = getDataStorePath("/config/", context) + m.getName() + ":";
-                addApis(node, apis, resourcePath, pathParams, true);
+                String resourcePath = getDataStorePath("/config/", context);
+                addApis(node, apis, resourcePath, pathParams, schemaContext, true);
 
                 pathParams = new ArrayList<Parameter>();
-                resourcePath = getDataStorePath("/operational/", context) + m.getName() + ":";
-                addApis(node, apis, resourcePath, pathParams, false);
+                resourcePath = getDataStorePath("/operational/", context);
+                addApis(node, apis, resourcePath, pathParams, schemaContext, false);
             }
 
             Set<RpcDefinition> rpcs = m.getRpcs();
             for (RpcDefinition rpcDefinition : rpcs) {
-                String resourcePath = getDataStorePath("/operations/", context) + m.getName() + ":";
-                addRpcs(rpcDefinition, apis, resourcePath);
+                String resourcePath = getDataStorePath("/operations/", context);
+                addRpcs(rpcDefinition, apis, resourcePath, schemaContext);
             }
         }
 
@@ -193,7 +185,7 @@ public class BaseYangSwaggerGenerator {
             JSONObject models = null;
 
             try {
-                models = jsonConverter.convertToJsonSchema(m);
+                models = jsonConverter.convertToJsonSchema(m, schemaContext);
                 doc.setModels(models);
                 if (_logger.isDebugEnabled()) {
                     _logger.debug(mapper.writeValueAsString(doc));
@@ -228,13 +220,13 @@ public class BaseYangSwaggerGenerator {
         return module + "(" + revision + ")";
     }
 
-    private void addApis(DataSchemaNode node, List<Api> apis, String parentPath,
-            List<Parameter> parentPathParams, boolean addConfigApi) {
+    private void addApis(DataSchemaNode node, List<Api> apis, String parentPath, List<Parameter> parentPathParams, SchemaContext schemaContext,
+            boolean addConfigApi) {
 
         Api api = new Api();
         List<Parameter> pathParams = new ArrayList<Parameter>(parentPathParams);
 
-        String resourcePath = parentPath + createPath(node, pathParams) + "/";
+        String resourcePath = parentPath + createPath(node, pathParams, schemaContext) + "/";
         _logger.debug("Adding path: [{}]", resourcePath);
         api.setPath(resourcePath);
         api.setOperations(operations(node, pathParams, addConfigApi));
@@ -248,7 +240,7 @@ public class BaseYangSwaggerGenerator {
                 if (childNode instanceof ListSchemaNode || childNode instanceof ContainerSchemaNode) {
                     // keep config and operation attributes separate.
                     if (childNode.isConfiguration() == addConfigApi) {
-                        addApis(childNode, apis, resourcePath, pathParams, addConfigApi);
+                        addApis(childNode, apis, resourcePath, pathParams, schemaContext, addConfigApi);
                     }
                 }
             }
@@ -261,8 +253,7 @@ public class BaseYangSwaggerGenerator {
      * @param pathParams
      * @return
      */
-    private List<Operation> operations(DataSchemaNode node, List<Parameter> pathParams,
-            boolean isConfig) {
+    private List<Operation> operations(DataSchemaNode node, List<Parameter> pathParams, boolean isConfig) {
         List<Operation> operations = new ArrayList<>();
 
         OperationBuilder.Get getBuilder = new OperationBuilder.Get(node, isConfig);
@@ -281,41 +272,37 @@ public class BaseYangSwaggerGenerator {
         return operations;
     }
 
-    private String createPath(final DataSchemaNode schemaNode, List<Parameter> pathParams) {
+    private String createPath(final DataSchemaNode schemaNode, List<Parameter> pathParams, SchemaContext schemaContext) {
         ArrayList<LeafSchemaNode> pathListParams = new ArrayList<LeafSchemaNode>();
         StringBuilder path = new StringBuilder();
-        QName _qName = schemaNode.getQName();
-        String localName = _qName.getLocalName();
+        String localName = resolvePathArgumentsName(schemaNode, schemaContext);
         path.append(localName);
 
         if ((schemaNode instanceof ListSchemaNode)) {
             final List<QName> listKeys = ((ListSchemaNode) schemaNode).getKeyDefinition();
             for (final QName listKey : listKeys) {
-                {
-                    DataSchemaNode _dataChildByName = ((DataNodeContainer) schemaNode)
-                            .getDataChildByName(listKey);
-                    pathListParams.add(((LeafSchemaNode) _dataChildByName));
-
-                    String pathParamIdentifier = new StringBuilder("/{")
-                            .append(listKey.getLocalName()).append("}").toString();
-                    path.append(pathParamIdentifier);
-
-                    Parameter pathParam = new Parameter();
-                    pathParam.setName(listKey.getLocalName());
-                    pathParam.setDescription(_dataChildByName.getDescription());
-                    pathParam.setType("string");
-                    pathParam.setParamType("path");
-
-                    pathParams.add(pathParam);
-                }
+                DataSchemaNode _dataChildByName = ((DataNodeContainer) schemaNode).getDataChildByName(listKey);
+                pathListParams.add(((LeafSchemaNode) _dataChildByName));
+
+                String pathParamIdentifier = new StringBuilder("/{").append(listKey.getLocalName()).append("}")
+                        .toString();
+                path.append(pathParamIdentifier);
+
+                Parameter pathParam = new Parameter();
+                pathParam.setName(listKey.getLocalName());
+                pathParam.setDescription(_dataChildByName.getDescription());
+                pathParam.setType("string");
+                pathParam.setParamType("path");
+
+                pathParams.add(pathParam);
             }
         }
         return path.toString();
     }
 
-    protected void addRpcs(RpcDefinition rpcDefn, List<Api> apis, String parentPath) {
+    protected void addRpcs(RpcDefinition rpcDefn, List<Api> apis, String parentPath, SchemaContext schemaContext) {
         Api rpc = new Api();
-        String resourcePath = parentPath + rpcDefn.getQName().getLocalName();
+        String resourcePath = parentPath + resolvePathArgumentsName(rpcDefn, schemaContext);
         rpc.setPath(resourcePath);
 
         Operation operationSpec = new Operation();
@@ -364,4 +351,5 @@ public class BaseYangSwaggerGenerator {
         }
         return sortedModules;
     }
+
 }
index 95bb1a094371db4f17d38c310c0043b07006c5c3..819892f6477b2994e53c927cf1ab3a8dd2c2b545 100644 (file)
@@ -7,6 +7,8 @@
  */
 package org.opendaylight.controller.sal.rest.doc.impl;
 
+import static org.opendaylight.controller.sal.rest.doc.util.RestDocgenUtil.resolveNodesName;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -19,6 +21,7 @@ import org.json.JSONArray;
 import org.json.JSONException;
 import org.json.JSONObject;
 import org.opendaylight.controller.sal.rest.doc.model.builder.OperationBuilder;
+import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.model.api.AnyXmlSchemaNode;
 import org.opendaylight.yangtools.yang.model.api.ChoiceCaseNode;
 import org.opendaylight.yangtools.yang.model.api.ChoiceNode;
@@ -31,6 +34,7 @@ import org.opendaylight.yangtools.yang.model.api.LeafSchemaNode;
 import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
 import org.opendaylight.yangtools.yang.model.api.Module;
 import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.opendaylight.yangtools.yang.model.api.TypeDefinition;
 import org.opendaylight.yangtools.yang.model.api.type.BinaryTypeDefinition;
 import org.opendaylight.yangtools.yang.model.api.type.BitsTypeDefinition;
@@ -86,8 +90,8 @@ public class ModelGenerator {
     private static final String NUMBER = "number";
     private static final String BOOLEAN = "boolean";
     private static final String STRING = "string";
-  private static final String ID_KEY = "id";
-  private static final String SUB_TYPES_KEY = "subTypes";
+    private static final String ID_KEY = "id";
+    private static final String SUB_TYPES_KEY = "subTypes";
 
     private static final Map<Class<? extends TypeDefinition<?>>, String> YANG_TYPE_TO_JSON_TYPE_MAPPING;
 
@@ -111,18 +115,21 @@ public class ModelGenerator {
         YANG_TYPE_TO_JSON_TYPE_MAPPING = Collections.unmodifiableMap(tempMap1);
     }
 
+    private Module topLevelModule;
+
     public ModelGenerator() {
     }
 
-    public JSONObject convertToJsonSchema(Module module) throws IOException, JSONException {
+    public JSONObject convertToJsonSchema(Module module, SchemaContext schemaContext) throws IOException, JSONException {
         JSONObject models = new JSONObject();
-        processContainers(module, models);
-        processRPCs(module, models);
-    processIdentities(module, models);
+        topLevelModule = module;
+        processContainers(module, models, schemaContext);
+        processRPCs(module, models, schemaContext);
+        processIdentities(module, models);
         return models;
     }
 
-    private void processContainers(Module module, JSONObject models) throws IOException,
+    private void processContainers(Module module, JSONObject models, SchemaContext schemaContext) throws IOException,
             JSONException {
 
         String moduleName = module.getName();
@@ -136,10 +143,10 @@ public class ModelGenerator {
              * For every container in the module
              */
             if (childNode instanceof ContainerSchemaNode) {
-                configModuleJSON = processContainer((ContainerSchemaNode) childNode, moduleName,
-                        true, models, true);
-                operationalModuleJSON = processContainer((ContainerSchemaNode) childNode,
-                        moduleName, true, models, false);
+                configModuleJSON = processContainer((ContainerSchemaNode) childNode, moduleName, true, models, true,
+                        schemaContext);
+                operationalModuleJSON = processContainer((ContainerSchemaNode) childNode, moduleName, true, models,
+                        false, schemaContext);
             }
 
             if (configModuleJSON != null) {
@@ -157,15 +164,15 @@ public class ModelGenerator {
     }
 
     /**
-     * Process the RPCs for a Module Spits out a file each of the name
-     * <rpcName>-input.json and <rpcName>-output.json for each RPC that contains
-     * input & output elements
+     * Process the RPCs for a Module Spits out a file each of the name <rpcName>-input.json and <rpcName>-output.json
+     * for each RPC that contains input & output elements
      *
      * @param module
      * @throws JSONException
      * @throws IOException
      */
-    private void processRPCs(Module module, JSONObject models) throws JSONException, IOException {
+    private void processRPCs(Module module, JSONObject models, SchemaContext schemaContext) throws JSONException,
+            IOException {
 
         Set<RpcDefinition> rpcs = module.getRpcs();
         String moduleName = module.getName();
@@ -173,7 +180,7 @@ public class ModelGenerator {
 
             ContainerSchemaNode input = rpc.getInput();
             if (input != null) {
-                JSONObject inputJSON = processContainer(input, moduleName, true, models);
+                JSONObject inputJSON = processContainer(input, moduleName, true, models, schemaContext);
                 String filename = "(" + rpc.getQName().getLocalName() + ")input";
                 inputJSON.put("id", filename);
                 // writeToFile(filename, inputJSON.toString(2), moduleName);
@@ -182,7 +189,7 @@ public class ModelGenerator {
 
             ContainerSchemaNode output = rpc.getOutput();
             if (output != null) {
-                JSONObject outputJSON = processContainer(output, moduleName, true, models);
+                JSONObject outputJSON = processContainer(output, moduleName, true, models, schemaContext);
                 String filename = "(" + rpc.getQName().getLocalName() + ")output";
                 outputJSON.put("id", filename);
                 models.put(filename, outputJSON);
@@ -190,58 +197,59 @@ public class ModelGenerator {
         }
     }
 
-  /**
-   * Processes the 'identity' statement in a yang model
-   * and maps it to a 'model' in the Swagger JSON spec.
-   *
-   * @param module The module from which the identity stmt will be processed
-   * @param models The JSONObject in which the parsed identity will be put as a 'model' obj
-   * @throws JSONException
-   */
-  private void processIdentities(Module module, JSONObject models) throws JSONException {
-
-    String moduleName = module.getName();
-    Set<IdentitySchemaNode> idNodes =  module.getIdentities();
-    _logger.debug("Processing Identities for module {} . Found {} identity statements", moduleName, idNodes.size());
-
-    for(IdentitySchemaNode idNode : idNodes){
-      JSONObject identityObj=new JSONObject();
-      String identityName = idNode.getQName().getLocalName();
-      _logger.debug("Processing Identity: {}", identityName);
-
-      identityObj.put(ID_KEY, identityName);
-      identityObj.put(DESCRIPTION_KEY, idNode.getDescription());
-
-      JSONObject props = new JSONObject();
-      IdentitySchemaNode baseId = idNode.getBaseIdentity();
+    /**
+     * Processes the 'identity' statement in a yang model and maps it to a 'model' in the Swagger JSON spec.
+     *
+     * @param module
+     *            The module from which the identity stmt will be processed
+     * @param models
+     *            The JSONObject in which the parsed identity will be put as a 'model' obj
+     * @throws JSONException
+     */
+    private void processIdentities(Module module, JSONObject models) throws JSONException {
 
+        String moduleName = module.getName();
+        Set<IdentitySchemaNode> idNodes = module.getIdentities();
+        _logger.debug("Processing Identities for module {} . Found {} identity statements", moduleName, idNodes.size());
+
+        for (IdentitySchemaNode idNode : idNodes) {
+            JSONObject identityObj = new JSONObject();
+            String identityName = idNode.getQName().getLocalName();
+            _logger.debug("Processing Identity: {}", identityName);
+
+            identityObj.put(ID_KEY, identityName);
+            identityObj.put(DESCRIPTION_KEY, idNode.getDescription());
+
+            JSONObject props = new JSONObject();
+            IdentitySchemaNode baseId = idNode.getBaseIdentity();
+
+            if (baseId == null) {
+                /**
+                 * This is a base identity. So lets see if it has sub types. If it does, then add them to the model
+                 * definition.
+                 */
+                Set<IdentitySchemaNode> derivedIds = idNode.getDerivedIdentities();
+
+                if (derivedIds != null) {
+                    JSONArray subTypes = new JSONArray();
+                    for (IdentitySchemaNode derivedId : derivedIds) {
+                        subTypes.put(derivedId.getQName().getLocalName());
+                    }
+                    identityObj.put(SUB_TYPES_KEY, subTypes);
+                }
+            } else {
+                /**
+                 * This is a derived entity. Add it's base type & move on.
+                 */
+                props.put(TYPE_KEY, baseId.getQName().getLocalName());
+            }
 
-      if(baseId==null) {
-        /**
-         * This is a base identity. So lets see if
-         * it has sub types. If it does, then add them to the model definition.
-         */
-        Set<IdentitySchemaNode> derivedIds = idNode.getDerivedIdentities();
-
-        if(derivedIds != null) {
-          JSONArray subTypes = new JSONArray();
-          for(IdentitySchemaNode derivedId : derivedIds){
-            subTypes.put(derivedId.getQName().getLocalName());
-          }
-          identityObj.put(SUB_TYPES_KEY, subTypes);
+            // Add the properties. For a base type, this will be an empty object as required by the Swagger spec.
+            identityObj.put(PROPERTIES_KEY, props);
+            models.put(identityName, identityObj);
         }
-      } else {
-        /**
-         * This is a derived entity. Add it's base type & move on.
-         */
-        props.put(TYPE_KEY, baseId.getQName().getLocalName());
-      }
-
-      //Add the properties. For a base type, this will be an empty object as required by the Swagger spec.
-      identityObj.put(PROPERTIES_KEY, props);
-      models.put(identityName, identityObj);
     }
-  }
+
     /**
      * Processes the container node and populates the moduleJSON
      *
@@ -251,14 +259,13 @@ public class ModelGenerator {
      * @throws JSONException
      * @throws IOException
      */
-    private JSONObject processContainer(ContainerSchemaNode container, String moduleName,
-            boolean addSchemaStmt, JSONObject models) throws JSONException, IOException {
-        return processContainer(container, moduleName, addSchemaStmt, models, (Boolean) null);
+    private JSONObject processContainer(ContainerSchemaNode container, String moduleName, boolean addSchemaStmt,
+            JSONObject models, SchemaContext schemaContext) throws JSONException, IOException {
+        return processContainer(container, moduleName, addSchemaStmt, models, (Boolean) null, schemaContext);
     }
 
-    private JSONObject processContainer(ContainerSchemaNode container, String moduleName,
-            boolean addSchemaStmt, JSONObject models, Boolean isConfig) throws JSONException,
-            IOException {
+    private JSONObject processContainer(ContainerSchemaNode container, String moduleName, boolean addSchemaStmt,
+            JSONObject models, Boolean isConfig, SchemaContext schemaContext) throws JSONException, IOException {
         JSONObject moduleJSON = getSchemaTemplate();
         if (addSchemaStmt) {
             moduleJSON = getSchemaTemplate();
@@ -270,57 +277,58 @@ public class ModelGenerator {
         String containerDescription = container.getDescription();
         moduleJSON.put(DESCRIPTION_KEY, containerDescription);
 
-        JSONObject properties = processChildren(container.getChildNodes(), moduleName, models, isConfig);
+        JSONObject properties = processChildren(container.getChildNodes(), container.getQName(), moduleName, models,
+                isConfig, schemaContext);
         moduleJSON.put(PROPERTIES_KEY, properties);
         return moduleJSON;
     }
 
-    private JSONObject processChildren(Iterable<DataSchemaNode> nodes, String moduleName,
-            JSONObject models) throws JSONException, IOException {
-        return processChildren(nodes, moduleName, models, null);
+    private JSONObject processChildren(Iterable<DataSchemaNode> nodes, QName parentQName, String moduleName,
+            JSONObject models, SchemaContext schemaContext) throws JSONException, IOException {
+        return processChildren(nodes, parentQName, moduleName, models, null, schemaContext);
     }
 
     /**
      * Processes the nodes
      *
      * @param nodes
+     * @param parentQName
      * @param moduleName
      * @param isConfig
      * @return
      * @throws JSONException
      * @throws IOException
      */
-    private JSONObject processChildren(Iterable<DataSchemaNode> nodes, String moduleName,
-            JSONObject models, Boolean isConfig) throws JSONException, IOException {
+    private JSONObject processChildren(Iterable<DataSchemaNode> nodes, QName parentQName, String moduleName,
+            JSONObject models, Boolean isConfig, SchemaContext schemaContext) throws JSONException, IOException {
 
         JSONObject properties = new JSONObject();
 
         for (DataSchemaNode node : nodes) {
             if (isConfig == null || node.isConfiguration() == isConfig) {
 
-                String name = node.getQName().getLocalName();
+                String name = resolveNodesName(node, topLevelModule, schemaContext);
                 JSONObject property = null;
                 if (node instanceof LeafSchemaNode) {
                     property = processLeafNode((LeafSchemaNode) node);
                 } else if (node instanceof ListSchemaNode) {
-                    property = processListSchemaNode((ListSchemaNode) node, moduleName, models, isConfig);
+                    property = processListSchemaNode((ListSchemaNode) node, moduleName, models, isConfig, schemaContext);
 
                 } else if (node instanceof LeafListSchemaNode) {
                     property = processLeafListNode((LeafListSchemaNode) node);
 
                 } else if (node instanceof ChoiceNode) {
-                    property = processChoiceNode((ChoiceNode) node, moduleName, models);
+                    property = processChoiceNode((ChoiceNode) node, moduleName, models, schemaContext);
 
                 } else if (node instanceof AnyXmlSchemaNode) {
                     property = processAnyXMLNode((AnyXmlSchemaNode) node);
 
                 } else if (node instanceof ContainerSchemaNode) {
-                    property = processContainer((ContainerSchemaNode) node, moduleName, false,
-                            models, isConfig);
+                    property = processContainer((ContainerSchemaNode) node, moduleName, false, models, isConfig,
+                            schemaContext);
 
                 } else {
-                    throw new IllegalArgumentException("Unknown DataSchemaNode type: "
-                            + node.getClass());
+                    throw new IllegalArgumentException("Unknown DataSchemaNode type: " + node.getClass());
                 }
 
                 property.putOpt(DESCRIPTION_KEY, node.getDescription());
@@ -356,15 +364,16 @@ public class ModelGenerator {
      * @throws JSONException
      * @throws IOException
      */
-    private JSONObject processChoiceNode(ChoiceNode choiceNode, String moduleName, JSONObject models)
-            throws JSONException, IOException {
+    private JSONObject processChoiceNode(ChoiceNode choiceNode, String moduleName, JSONObject models,
+            SchemaContext schemaContext) throws JSONException, IOException {
 
         Set<ChoiceCaseNode> cases = choiceNode.getCases();
 
         JSONArray choiceProps = new JSONArray();
         for (ChoiceCaseNode choiceCase : cases) {
             String choiceName = choiceCase.getQName().getLocalName();
-            JSONObject choiceProp = processChildren(choiceCase.getChildNodes(), moduleName, models);
+            JSONObject choiceProp = processChildren(choiceCase.getChildNodes(), choiceCase.getQName(), moduleName,
+                    models, schemaContext);
             JSONObject choiceObj = new JSONObject();
             choiceObj.put(choiceName, choiceProp);
             choiceObj.put(TYPE_KEY, OBJECT_TYPE);
@@ -384,8 +393,7 @@ public class ModelGenerator {
      * @param props
      * @throws JSONException
      */
-    private void processConstraints(ConstraintDefinition constraints, JSONObject props)
-            throws JSONException {
+    private void processConstraints(ConstraintDefinition constraints, JSONObject props) throws JSONException {
         boolean isMandatory = constraints.isMandatory();
         props.put(REQUIRED_KEY, isMandatory);
 
@@ -402,9 +410,8 @@ public class ModelGenerator {
     /**
      * Parses a ListSchema node.
      *
-     * Due to a limitation of the RAML--->JAX-RS tool, sub-properties must be in
-     * a separate JSON schema file. Hence, we have to write some properties to a
-     * new file, while continuing to process the rest.
+     * Due to a limitation of the RAML--->JAX-RS tool, sub-properties must be in a separate JSON schema file. Hence, we
+     * have to write some properties to a new file, while continuing to process the rest.
      *
      * @param listNode
      * @param moduleName
@@ -413,21 +420,21 @@ public class ModelGenerator {
      * @throws JSONException
      * @throws IOException
      */
-    private JSONObject processListSchemaNode(ListSchemaNode listNode, String moduleName,
-            JSONObject models, Boolean isConfig) throws JSONException, IOException {
+    private JSONObject processListSchemaNode(ListSchemaNode listNode, String moduleName, JSONObject models,
+            Boolean isConfig, SchemaContext schemaContext) throws JSONException, IOException {
 
-        String fileName = (BooleanUtils.isNotFalse(isConfig)?OperationBuilder.CONFIG:OperationBuilder.OPERATIONAL) +
-                                                                listNode.getQName().getLocalName();
+        String fileName = (BooleanUtils.isNotFalse(isConfig) ? OperationBuilder.CONFIG : OperationBuilder.OPERATIONAL)
+                + listNode.getQName().getLocalName();
 
-        JSONObject childSchemaProperties = processChildren(listNode.getChildNodes(), moduleName, models);
+        JSONObject childSchemaProperties = processChildren(listNode.getChildNodes(), listNode.getQName(), moduleName,
+                models, schemaContext);
         JSONObject childSchema = getSchemaTemplate();
         childSchema.put(TYPE_KEY, OBJECT_TYPE);
         childSchema.put(PROPERTIES_KEY, childSchemaProperties);
 
         /*
-         * Due to a limitation of the RAML--->JAX-RS tool, sub-properties must
-         * be in a separate JSON schema file. Hence, we have to write some
-         * properties to a new file, while continuing to process the rest.
+         * Due to a limitation of the RAML--->JAX-RS tool, sub-properties must be in a separate JSON schema file. Hence,
+         * we have to write some properties to a new file, while continuing to process the rest.
          */
         // writeToFile(fileName, childSchema.toString(2), moduleName);
         childSchema.put("id", fileName);
@@ -483,8 +490,7 @@ public class ModelGenerator {
      * @param property
      * @throws JSONException
      */
-    private void processTypeDef(TypeDefinition<?> leafTypeDef, JSONObject property)
-            throws JSONException {
+    private void processTypeDef(TypeDefinition<?> leafTypeDef, JSONObject property) throws JSONException {
 
         if (leafTypeDef instanceof ExtendedType) {
             processExtendedType(leafTypeDef, property);
@@ -498,7 +504,7 @@ public class ModelGenerator {
             processUnionType((UnionTypeDefinition) leafTypeDef, property);
 
         } else if (leafTypeDef instanceof IdentityrefTypeDefinition) {
-      property.putOpt(TYPE_KEY, ((IdentityrefTypeDefinition) leafTypeDef).getIdentity().getQName().getLocalName());
+            property.putOpt(TYPE_KEY, ((IdentityrefTypeDefinition) leafTypeDef).getIdentity().getQName().getLocalName());
         } else if (leafTypeDef instanceof BinaryTypeDefinition) {
             processBinaryType((BinaryTypeDefinition) leafTypeDef, property);
         } else {
@@ -517,15 +523,13 @@ public class ModelGenerator {
      * @param property
      * @throws JSONException
      */
-    private void processExtendedType(TypeDefinition<?> leafTypeDef, JSONObject property)
-            throws JSONException {
+    private void processExtendedType(TypeDefinition<?> leafTypeDef, JSONObject property) throws JSONException {
         Object leafBaseType = leafTypeDef.getBaseType();
         if (leafBaseType instanceof ExtendedType) {
             // recursively process an extended type until we hit a base type
             processExtendedType((TypeDefinition<?>) leafBaseType, property);
         } else {
-            List<LengthConstraint> lengthConstraints = ((ExtendedType) leafTypeDef)
-                    .getLengthConstraints();
+            List<LengthConstraint> lengthConstraints = ((ExtendedType) leafTypeDef).getLengthConstraints();
             for (LengthConstraint lengthConstraint : lengthConstraints) {
                 Number min = lengthConstraint.getMin();
                 Number max = lengthConstraint.getMax();
@@ -541,8 +545,7 @@ public class ModelGenerator {
     /*
    *
    */
-    private void processBinaryType(BinaryTypeDefinition binaryType, JSONObject property)
-            throws JSONException {
+    private void processBinaryType(BinaryTypeDefinition binaryType, JSONObject property) throws JSONException {
         property.put(TYPE_KEY, STRING);
         JSONObject media = new JSONObject();
         media.put(BINARY_ENCODING_KEY, BASE_64);
@@ -555,8 +558,7 @@ public class ModelGenerator {
      * @param property
      * @throws JSONException
      */
-    private void processEnumType(EnumerationType enumLeafType, JSONObject property)
-            throws JSONException {
+    private void processEnumType(EnumerationType enumLeafType, JSONObject property) throws JSONException {
         List<EnumPair> enumPairs = enumLeafType.getValues();
         List<String> enumNames = new ArrayList<String>();
         for (EnumPair enumPair : enumPairs) {
@@ -571,8 +573,7 @@ public class ModelGenerator {
      * @param property
      * @throws JSONException
      */
-    private void processBitsType(BitsTypeDefinition bitsType, JSONObject property)
-            throws JSONException {
+    private void processBitsType(BitsTypeDefinition bitsType, JSONObject property) throws JSONException {
         property.put(TYPE_KEY, ARRAY_TYPE);
         property.put(MIN_ITEMS, 0);
         property.put(UNIQUE_ITEMS_KEY, true);
@@ -593,18 +594,17 @@ public class ModelGenerator {
      * @param property
      * @throws JSONException
      */
-    private void processUnionType(UnionTypeDefinition unionType, JSONObject property)
-            throws JSONException {
+    private void processUnionType(UnionTypeDefinition unionType, JSONObject property) throws JSONException {
 
         StringBuilder type = new StringBuilder();
-        for (TypeDefinition<?> typeDef : unionType.getTypes() ) {
-            if( type.length() > 0 ){
-                type.append( " or " );
+        for (TypeDefinition<?> typeDef : unionType.getTypes()) {
+            if (type.length() > 0) {
+                type.append(" or ");
             }
             type.append(YANG_TYPE_TO_JSON_TYPE_MAPPING.get(typeDef.getClass()));
         }
 
-        property.put(TYPE_KEY, type );
+        property.put(TYPE_KEY, type);
     }
 
     /**
@@ -619,4 +619,5 @@ public class ModelGenerator {
 
         return schemaJSON;
     }
+
 }
index 29ada12c6f8737a3d61fc5e68abde7cece17fad1..7e8707110fb757e9fae69fb2e02837d34d5b22cf 100644 (file)
@@ -17,9 +17,7 @@ import java.util.Map.Entry;
 import java.util.TreeMap;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
-
 import javax.ws.rs.core.UriInfo;
-
 import org.opendaylight.controller.sal.core.api.model.SchemaService;
 import org.opendaylight.controller.sal.core.api.mount.MountProvisionInstance;
 import org.opendaylight.controller.sal.core.api.mount.MountProvisionService;
diff --git a/opendaylight/md-sal/sal-rest-docgen/src/main/java/org/opendaylight/controller/sal/rest/doc/util/RestDocgenUtil.java b/opendaylight/md-sal/sal-rest-docgen/src/main/java/org/opendaylight/controller/sal/rest/doc/util/RestDocgenUtil.java
new file mode 100644 (file)
index 0000000..9e1d82a
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2014 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.rest.doc.util;
+
+import java.net.URI;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import org.opendaylight.yangtools.yang.common.QName;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaNode;
+
+public class RestDocgenUtil {
+
+    private RestDocgenUtil() {
+    }
+
+    private static Map<URI, Map<Date, Module>> namespaceAndRevisionToModule = new HashMap<URI, Map<Date, Module>>();
+
+    /**
+     * Resolve path argument name for {@code node}.
+     *
+     * The name can contain also prefix which consists of module name followed by colon. The module prefix is presented
+     * if namespace of {@code node} and its parent is different. In other cases only name of {@code node} is returned.
+     *
+     * @return name of {@code node}
+     */
+    public static String resolvePathArgumentsName(final SchemaNode node, final SchemaContext schemaContext) {
+        Iterable<QName> schemaPath = node.getPath().getPathTowardsRoot();
+        Iterator<QName> it = schemaPath.iterator();
+        QName nodeQName = it.next();
+
+        QName parentQName = null;
+        if (it.hasNext()) {
+            parentQName = it.next();
+        }
+        if (isEqualNamespaceAndRevision(parentQName, nodeQName)) {
+            return node.getQName().getLocalName();
+        } else {
+            return resolveFullNameFromNode(node, schemaContext);
+        }
+    }
+
+    private synchronized static String resolveFullNameFromNode(final SchemaNode node, final SchemaContext schemaContext) {
+        final URI namespace = node.getQName().getNamespace();
+        final Date revision = node.getQName().getRevision();
+
+        Map<Date, Module> revisionToModule = namespaceAndRevisionToModule.get(namespace);
+        if (revisionToModule == null) {
+            revisionToModule = new HashMap<>();
+            namespaceAndRevisionToModule.put(namespace, revisionToModule);
+        }
+        Module module = revisionToModule.get(revision);
+        if (module == null) {
+            module = schemaContext.findModuleByNamespaceAndRevision(namespace, revision);
+            revisionToModule.put(revision, module);
+        }
+        if (module != null) {
+            return module.getName() + ":" + node.getQName().getLocalName();
+        }
+        return node.getQName().getLocalName();
+    }
+
+    public static String resolveNodesName(final SchemaNode node, final Module module, final SchemaContext schemaContext) {
+        if (node.getQName().getNamespace().equals(module.getQNameModule().getNamespace())
+                && node.getQName().getRevision().equals(module.getQNameModule().getRevision())) {
+            return node.getQName().getLocalName();
+        } else {
+            return resolveFullNameFromNode(node, schemaContext);
+        }
+    }
+
+    private static boolean isEqualNamespaceAndRevision(QName parentQName, QName nodeQName) {
+        if (parentQName == null) {
+            if (nodeQName == null) {
+                return true;
+            }
+            return false;
+        }
+        return parentQName.getNamespace().equals(nodeQName.getNamespace())
+                && parentQName.getRevision().equals(nodeQName.getRevision());
+    }
+}
index 07c9378439d2f66551672d8a08ec20785a0a074b..19f82b53867b603af20576759485999ca1fe28c4 100644 (file)
@@ -5,16 +5,17 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import com.google.common.base.Preconditions;
 import java.io.File;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.TreeSet;
-
 import javax.ws.rs.core.UriInfo;
-
 import junit.framework.Assert;
-
+import org.json.JSONException;
+import org.json.JSONObject;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -25,8 +26,8 @@ import org.opendaylight.controller.sal.rest.doc.swagger.Operation;
 import org.opendaylight.controller.sal.rest.doc.swagger.Resource;
 import org.opendaylight.controller.sal.rest.doc.swagger.ResourceList;
 import org.opendaylight.yangtools.yang.model.api.Module;
-
-import com.google.common.base.Preconditions;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
 
 /**
  *
@@ -36,12 +37,14 @@ public class ApiDocGeneratorTest {
     public static final String HTTP_HOST = "http://host";
     private ApiDocGenerator generator;
     private DocGenTestHelper helper;
+    private SchemaContext schemaContext;
 
     @Before
     public void setUp() throws Exception {
         generator = new ApiDocGenerator();
         helper = new DocGenTestHelper();
         helper.setUp();
+        schemaContext = new YangParserImpl().resolveSchemaContext(new HashSet<Module>(helper.getModules().values()));
     }
 
     @After
@@ -59,8 +62,9 @@ public class ApiDocGeneratorTest {
         for (Entry<File, Module> m : helper.getModules().entrySet()) {
             if (m.getKey().getAbsolutePath().endsWith("toaster_short.yang")) {
                 ApiDeclaration doc = generator.getSwaggerDocSpec(m.getValue(),
-                        "http://localhost:8080/restconf", "");
+                        "http://localhost:8080/restconf", "",schemaContext);
                 validateToaster(doc);
+                validateTosterDocContainsModulePrefixes(doc);
                 Assert.assertNotNull(doc);
             }
         }
@@ -73,7 +77,7 @@ public class ApiDocGeneratorTest {
         for (Entry<File, Module> m : helper.getModules().entrySet()) {
             if (m.getKey().getAbsolutePath().endsWith("toaster.yang")) {
                 ApiDeclaration doc = generator.getSwaggerDocSpec(m.getValue(),
-                        "http://localhost:8080/restconf", "");
+                        "http://localhost:8080/restconf", "",schemaContext);
                 Assert.assertNotNull(doc);
 
                 //testing bugs.opendaylight.org bug 1290. UnionType model type.
@@ -84,11 +88,21 @@ public class ApiDocGeneratorTest {
         }
     }
 
+    /**
+     * Tests whether from yang files are generated all required paths for HTTP operations (GET, DELETE, PUT, POST)
+     *
+     * If container | list is augmented then in path there should be specified module name followed with collon (e. g.
+     * "/config/module1:element1/element2/module2:element3")
+     *
+     * @param doc
+     * @throws Exception
+     */
     private void validateToaster(ApiDeclaration doc) throws Exception {
         Set<String> expectedUrls = new TreeSet<>(Arrays.asList(new String[] {
                 "/config/toaster2:toaster/", "/operational/toaster2:toaster/",
                 "/operations/toaster2:cancel-toast", "/operations/toaster2:make-toast",
-                "/operations/toaster2:restock-toaster" }));
+                "/operations/toaster2:restock-toaster",
+                "/config/toaster2:toaster/toasterSlot/{slotId}/toaster-augmented:slotInfo/" }));
 
         Set<String> actualUrls = new TreeSet<>();
 
@@ -130,7 +144,7 @@ public class ApiDocGeneratorTest {
     @Test
     public void testGetResourceListing() throws Exception {
         UriInfo info = helper.createMockUriInfo(HTTP_HOST);
-        SchemaService mockSchemaService = helper.createMockSchemaService();
+        SchemaService mockSchemaService = helper.createMockSchemaService(schemaContext);
 
         generator.setSchemaService(mockSchemaService);
 
@@ -154,4 +168,30 @@ public class ApiDocGeneratorTest {
         assertEquals(HTTP_HOST + "/toaster2(2009-11-20)", toaster2.getPath());
     }
 
+    private void validateTosterDocContainsModulePrefixes(ApiDeclaration doc) {
+        JSONObject topLevelJson = doc.getModels();
+        try {
+            JSONObject configToaster = topLevelJson.getJSONObject("(config)toaster");
+            assertNotNull("(config)toaster JSON object missing", configToaster);
+            //without module prefix
+            containsProperties(configToaster, "toasterSlot");
+
+            JSONObject toasterSlot = topLevelJson.getJSONObject("(config)toasterSlot");
+            assertNotNull("(config)toasterSlot JSON object missing", toasterSlot);
+            //with module prefix
+            containsProperties(toasterSlot, "toaster-augmented:slotInfo");
+
+        } catch (JSONException e) {
+            fail("Json exception while reading JSON object. Original message "+e.getMessage());
+        }
+    }
+
+    private void containsProperties(final JSONObject jsonObject,final String...properties) throws JSONException {
+        for (String property : properties) {
+            JSONObject propertiesObject = jsonObject.getJSONObject("properties");
+            assertNotNull("Properties object missing in ", propertiesObject);
+            JSONObject concretePropertyObject = propertiesObject.getJSONObject(property);
+            assertNotNull(property + " is missing",concretePropertyObject);
+        }
+    }
 }
index 0f15d00e79f53ecb3815d915b55454274dff7c93..7701d2a735e1af1304139e383b556ee8f8a53492 100644 (file)
@@ -10,6 +10,9 @@ package org.opendaylight.controller.sal.rest.doc.impl;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+import com.fasterxml.jackson.datatype.jsonorg.JsonOrgModule;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.net.URI;
@@ -19,23 +22,17 @@ import java.util.Date;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-
 import javax.ws.rs.core.UriBuilder;
 import javax.ws.rs.core.UriInfo;
-
 import org.mockito.ArgumentCaptor;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.opendaylight.controller.sal.core.api.model.SchemaService;
 import org.opendaylight.yangtools.yang.model.api.Module;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
-import org.opendaylight.yangtools.yang.model.parser.api.YangModelParser;
+import org.opendaylight.yangtools.yang.model.parser.api.YangContextParser;
 import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
 
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.SerializationFeature;
-import com.fasterxml.jackson.datatype.jsonorg.JsonOrgModule;
-
 public class DocGenTestHelper {
 
     private Map<File, Module> modules;
@@ -45,7 +42,7 @@ public class DocGenTestHelper {
             URISyntaxException {
 
         URI resourceDirUri = getClass().getResource(resourceDirectory).toURI();
-        final YangModelParser parser = new YangParserImpl();
+        final YangContextParser parser = new YangParserImpl();
         final File testDir = new File(resourceDirUri);
         final String[] fileList = testDir.list();
         final List<File> testFiles = new ArrayList<>();
@@ -90,6 +87,7 @@ public class DocGenTestHelper {
 
         final ArgumentCaptor<String> moduleCapture = ArgumentCaptor.forClass(String.class);
         final ArgumentCaptor<Date> dateCapture = ArgumentCaptor.forClass(Date.class);
+        final ArgumentCaptor<URI> namespaceCapture = ArgumentCaptor.forClass(URI.class);
         when(mockContext.findModuleByName(moduleCapture.capture(), dateCapture.capture())).then(
                 new Answer<Module>() {
                     @Override
@@ -104,6 +102,20 @@ public class DocGenTestHelper {
                         return null;
                     }
                 });
+        when(mockContext.findModuleByNamespaceAndRevision(namespaceCapture.capture(), dateCapture.capture())).then(
+                new Answer<Module>() {
+                    @Override
+                    public Module answer(InvocationOnMock invocation) throws Throwable {
+                        URI namespace = namespaceCapture.getValue();
+                        Date date = dateCapture.getValue();
+                        for (Module m : modules.values()) {
+                            if (m.getNamespace().equals(namespace) && m.getRevision().equals(date)) {
+                                return m;
+                            }
+                        }
+                        return null;
+                    }
+                });
         return mockContext;
     }
 
index bba8ed9ca6fcf557bb972af2d3c55c68bf1f3f13..940b99fd997fe919cf889c8f4902a5c748592636 100644 (file)
@@ -14,12 +14,11 @@ import static org.mockito.Mockito.when;
 
 import java.net.URISyntaxException;
 import java.util.Arrays;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.TreeSet;
-
 import javax.ws.rs.core.UriInfo;
-
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.sal.core.api.model.SchemaService;
@@ -33,7 +32,9 @@ import org.opendaylight.controller.sal.rest.doc.swagger.Resource;
 import org.opendaylight.controller.sal.rest.doc.swagger.ResourceList;
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.model.api.Module;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
 
 public class MountPointSwaggerTest {
 
@@ -44,12 +45,14 @@ public class MountPointSwaggerTest {
     private static final String INSTANCE_URL = "nodes/node/123/";
     private MountPointSwagger swagger;
     private DocGenTestHelper helper;
+    private SchemaContext schemaContext;
 
     @Before
     public void setUp() throws Exception {
         swagger = new MountPointSwagger();
         helper = new DocGenTestHelper();
         helper.setUp();
+        schemaContext = new YangParserImpl().resolveSchemaContext(new HashSet<Module>(helper.getModules().values()));
     }
 
     @Test()
diff --git a/opendaylight/md-sal/sal-rest-docgen/src/test/resources/yang/toaster_augmented.yang b/opendaylight/md-sal/sal-rest-docgen/src/test/resources/yang/toaster_augmented.yang
new file mode 100644 (file)
index 0000000..4db7897
--- /dev/null
@@ -0,0 +1,21 @@
+module toaster-augmented {
+
+    yang-version 1;
+
+    namespace
+      "http://netconfcentral.org/ns/toaster/augmented";
+
+    prefix toast;
+    import toaster2 {prefix tst; revision-date 2009-11-20;}
+
+    revision "2014-7-14" {
+    }
+
+    augment "/tst:toaster/tst:toasterSlot" {
+        container slotInfo {
+            leaf numberOfToastPrepared {
+                type uint32;
+            }
+        }
+    }
+}
\ No newline at end of file
index a1d5ab0a129fa92d362b4faa7d1e416fd367b7ab..6884076d5daafa1a8ced3753d1460754b950e1a1 100644 (file)
                 Microsoft Toaster.";
       }
 
+      list toasterSlot {
+        key "slotId";
+        leaf slotId {
+            type string;
+        }
+      }
+
       leaf toasterModelNumber {
         type DisplayString;
         config false;
index e01a0d5dcba4956f790d2274462455911970742c..97ed15df191d8159300e5754aa111d2c0ee8ea51 100644 (file)
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal</artifactId>
+      <artifactId>sal-binding-api</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller</groupId>
-      <artifactId>sal-binding-api</artifactId>
+      <artifactId>liblldp</artifactId>
     </dependency>
     <dependency>
       <groupId>org.opendaylight.controller.model</groupId>
index 82ab4432462af79fbbc6fe611f06175fa5cb93e9..0d1ba11ee12a991d4806a90c56b2150832af2378 100644 (file)
@@ -9,10 +9,10 @@ package org.opendaylight.md.controller.topology.lldp.utils;
 
 import java.nio.charset.Charset;
 
-import org.opendaylight.controller.sal.packet.Ethernet;
-import org.opendaylight.controller.sal.packet.LLDP;
-import org.opendaylight.controller.sal.packet.LLDPTLV;
-import org.opendaylight.controller.sal.utils.NetUtils;
+import org.opendaylight.controller.liblldp.Ethernet;
+import org.opendaylight.controller.liblldp.LLDP;
+import org.opendaylight.controller.liblldp.LLDPTLV;
+import org.opendaylight.controller.liblldp.NetUtils;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorId;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeConnectorRef;
 import org.opendaylight.yang.gen.v1.urn.opendaylight.inventory.rev130819.NodeId;
@@ -10,9 +10,6 @@ package org.opendaylight.controller.netconf.persist.impl;
 
 import static com.google.common.base.Preconditions.checkNotNull;
 
-import com.google.common.base.Function;
-import com.google.common.base.Stopwatch;
-import com.google.common.collect.Collections2;
 import java.io.IOException;
 import java.io.InputStream;
 import java.util.Collection;
@@ -23,10 +20,17 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeMap;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.TimeUnit;
+
 import javax.annotation.concurrent.Immutable;
+import javax.management.MBeanServerConnection;
+
 import org.opendaylight.controller.config.api.ConflictingVersionException;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
 import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
+import org.opendaylight.controller.config.persist.api.Persister;
 import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
 import org.opendaylight.controller.netconf.api.NetconfMessage;
 import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
@@ -45,22 +49,60 @@ import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.xml.sax.SAXException;
 
+import com.google.common.base.Function;
+import com.google.common.base.Stopwatch;
+import com.google.common.collect.Collections2;
+
 @Immutable
-public class ConfigPusher {
-    private static final Logger logger = LoggerFactory.getLogger(ConfigPusher.class);
+public class ConfigPusherImpl implements ConfigPusher {
+    private static final Logger logger = LoggerFactory.getLogger(ConfigPusherImpl.class);
 
     private final long maxWaitForCapabilitiesMillis;
     private final long conflictingVersionTimeoutMillis;
     private final NetconfOperationServiceFactory configNetconfConnector;
+    private static final int QUEUE_SIZE = 100;
+    private BlockingQueue<List<? extends ConfigSnapshotHolder>> queue = new LinkedBlockingQueue<List<? extends ConfigSnapshotHolder>>(QUEUE_SIZE);
 
-    public ConfigPusher(NetconfOperationServiceFactory configNetconfConnector, long maxWaitForCapabilitiesMillis,
+    public ConfigPusherImpl(NetconfOperationServiceFactory configNetconfConnector, long maxWaitForCapabilitiesMillis,
                         long conflictingVersionTimeoutMillis) {
         this.configNetconfConnector = configNetconfConnector;
         this.maxWaitForCapabilitiesMillis = maxWaitForCapabilitiesMillis;
         this.conflictingVersionTimeoutMillis = conflictingVersionTimeoutMillis;
     }
 
-    public synchronized LinkedHashMap<ConfigSnapshotHolder, EditAndCommitResponse> pushConfigs(List<ConfigSnapshotHolder> configs) throws NetconfDocumentedException {
+    public void process(List<AutoCloseable> autoCloseables, MBeanServerConnection platformMBeanServer, Persister persisterAggregator) throws InterruptedException {
+        List<? extends ConfigSnapshotHolder> configs;
+        while(true) {
+            configs = queue.take();
+            try {
+                internalPushConfigs(configs);
+                ConfigPersisterNotificationHandler jmxNotificationHandler = new ConfigPersisterNotificationHandler(platformMBeanServer, persisterAggregator);
+                synchronized (autoCloseables) {
+                    autoCloseables.add(jmxNotificationHandler);
+                }
+                /*
+                 * We have completed initial configuration. At this point
+                 * it is good idea to perform garbage collection to prune
+                 * any garbage we have accumulated during startup.
+                 */
+                logger.debug("Running post-initialization garbage collection...");
+                System.gc();
+                logger.debug("Post-initialization garbage collection completed.");
+                logger.debug("ConfigPusher has pushed configs {}, gc completed", configs);
+            }
+            catch (NetconfDocumentedException e) {
+                logger.error("Error pushing configs {}",configs);
+                throw new IllegalStateException(e);
+            }
+        }
+    }
+
+    public void pushConfigs(List<? extends ConfigSnapshotHolder> configs) throws InterruptedException {
+        logger.debug("Requested to push configs {}", configs);
+        this.queue.put(configs);
+    }
+
+    private LinkedHashMap<? extends ConfigSnapshotHolder, EditAndCommitResponse> internalPushConfigs(List<? extends ConfigSnapshotHolder> configs) throws NetconfDocumentedException {
         logger.debug("Last config snapshots to be pushed to netconf: {}", configs);
         LinkedHashMap<ConfigSnapshotHolder, EditAndCommitResponse> result = new LinkedHashMap<>();
         // start pushing snapshots:
@@ -278,7 +320,7 @@ public class ConfigPusher {
 
     private static NetconfMessage getCommitMessage() {
         String resource = "/netconfOp/commit.xml";
-        try (InputStream stream = ConfigPusher.class.getResourceAsStream(resource)) {
+        try (InputStream stream = ConfigPusherImpl.class.getResourceAsStream(resource)) {
             checkNotNull(stream, "Unable to load resource " + resource);
             return new NetconfMessage(XmlUtil.readXmlToDocument(stream));
         } catch (SAXException | IOException e) {
index 48ae0cb91a132d1cd7bff773df604e2d5ae1c47a..0a48e6c67dc8cc290c2b81a1b0eb67c05ac7c618 100644 (file)
@@ -8,13 +8,18 @@
 
 package org.opendaylight.controller.netconf.persist.impl.osgi;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.lang.management.ManagementFactory;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import javax.management.MBeanServer;
+
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
 import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
-import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
 import org.opendaylight.controller.netconf.mapping.api.NetconfOperationProvider;
 import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
-import org.opendaylight.controller.netconf.persist.impl.ConfigPersisterNotificationHandler;
-import org.opendaylight.controller.netconf.persist.impl.ConfigPusher;
+import org.opendaylight.controller.netconf.persist.impl.ConfigPusherImpl;
 import org.opendaylight.controller.netconf.persist.impl.PersisterAggregator;
 import org.opendaylight.controller.netconf.util.CloseableUtil;
 import org.osgi.framework.BundleActivator;
@@ -23,16 +28,13 @@ import org.osgi.framework.Constants;
 import org.osgi.framework.Filter;
 import org.osgi.framework.InvalidSyntaxException;
 import org.osgi.framework.ServiceReference;
+import org.osgi.framework.ServiceRegistration;
 import org.osgi.util.tracker.ServiceTracker;
 import org.osgi.util.tracker.ServiceTrackerCustomizer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.management.MBeanServer;
-import java.lang.management.ManagementFactory;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
+import com.google.common.annotations.VisibleForTesting;
 
 public class ConfigPersisterActivator implements BundleActivator {
 
@@ -49,11 +51,15 @@ public class ConfigPersisterActivator implements BundleActivator {
     public static final String STORAGE_ADAPTER_CLASS_PROP_SUFFIX = "storageAdapterClass";
 
     private List<AutoCloseable> autoCloseables;
+    private volatile BundleContext context;
 
+    ServiceRegistration<?> registration;
 
     @Override
     public void start(final BundleContext context) throws Exception {
         logger.debug("ConfigPersister starting");
+        this.context = context;
+
         autoCloseables = new ArrayList<>();
         PropertiesProviderBaseImpl propertiesProvider = new PropertiesProviderBaseImpl(context);
 
@@ -81,8 +87,14 @@ public class ConfigPersisterActivator implements BundleActivator {
     }
 
     @Override
-    public synchronized void stop(BundleContext context) throws Exception {
-        CloseableUtil.closeAll(autoCloseables);
+    public void stop(BundleContext context) throws Exception {
+        synchronized(autoCloseables) {
+            CloseableUtil.closeAll(autoCloseables);
+            if (registration != null) {
+                registration.unregister();
+            }
+            this.context = null;
+        }
     }
 
 
@@ -147,35 +159,29 @@ public class ConfigPersisterActivator implements BundleActivator {
             logger.trace("Got InnerCustomizer.addingService {}", reference);
             NetconfOperationServiceFactory service = reference.getBundle().getBundleContext().getService(reference);
 
-            final ConfigPusher configPusher = new ConfigPusher(service, maxWaitForCapabilitiesMillis, conflictingVersionTimeoutMillis);
+            logger.debug("Creating new job queue");
+
+            final ConfigPusherImpl configPusher = new ConfigPusherImpl(service, maxWaitForCapabilitiesMillis, conflictingVersionTimeoutMillis);
             logger.debug("Configuration Persister got {}", service);
+            logger.debug("Context was {}", context);
+            logger.debug("Registration was {}", registration);
+
             final Thread pushingThread = new Thread(new Runnable() {
                 @Override
                 public void run() {
                     try {
-                        configPusher.pushConfigs(configs);
-                    } catch (NetconfDocumentedException e) {
-                        logger.error("Error pushing configs {}",configs);
-                        throw new IllegalStateException(e);
+                        if(configs != null && !configs.isEmpty()) {
+                            configPusher.pushConfigs(configs);
+                        }
+                        registration = context.registerService(ConfigPusher.class.getName(), configPusher, null);
+                        configPusher.process(autoCloseables, platformMBeanServer, persisterAggregator);
+                    } catch (InterruptedException e) {
+                        logger.info("ConfigPusher thread stopped",e);
                     }
                     logger.info("Configuration Persister initialization completed.");
-
-                    /*
-                     * We have completed initial configuration. At this point
-                     * it is good idea to perform garbage collection to prune
-                     * any garbage we have accumulated during startup.
-                     */
-                    logger.debug("Running post-initialization garbage collection...");
-                    System.gc();
-                    logger.debug("Post-initialization garbage collection completed.");
-
-                    ConfigPersisterNotificationHandler jmxNotificationHandler = new ConfigPersisterNotificationHandler(platformMBeanServer, persisterAggregator);
-                    synchronized (ConfigPersisterActivator.this) {
-                        autoCloseables.add(jmxNotificationHandler);
-                    }
                 }
             }, "config-pusher");
-            synchronized (ConfigPersisterActivator.this) {
+            synchronized (autoCloseables) {
                 autoCloseables.add(new AutoCloseable() {
                     @Override
                     public void close() {
index 95fd5f65498e507c54e6d88023dc24b25475031a..3e5249468da703051ed8fe436843d37c316ab7c5 100644 (file)
@@ -7,10 +7,23 @@
  */
 package org.opendaylight.controller.netconf.persist.impl.osgi;
 
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Dictionary;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
 import org.mockito.Mock;
 import org.mockito.MockitoAnnotations;
+import org.opendaylight.controller.config.persist.api.ConfigPusher;
 import org.opendaylight.controller.config.persist.api.ConfigSnapshotHolder;
 import org.opendaylight.controller.config.persist.api.Persister;
 import org.opendaylight.controller.config.persist.api.PropertiesProvider;
@@ -23,18 +36,10 @@ import org.osgi.framework.BundleContext;
 import org.osgi.framework.Filter;
 import org.osgi.framework.ServiceListener;
 import org.osgi.framework.ServiceReference;
+import org.osgi.framework.ServiceRegistration;
 
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.doNothing;
-import static org.mockito.Mockito.doReturn;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 
 final class MockedBundleContext {
     @Mock
@@ -49,6 +54,8 @@ final class MockedBundleContext {
     NetconfOperationServiceFactory serviceFactory;
     @Mock
     private NetconfOperationService service;
+    @Mock
+    private ServiceRegistration<?> registration;
 
     MockedBundleContext(long maxWaitForCapabilitiesMillis, long conflictingVersionTimeoutMillis) throws Exception {
         MockitoAnnotations.initMocks(this);
@@ -77,6 +84,11 @@ final class MockedBundleContext {
         doReturn(Collections.emptySet()).when(service).getCapabilities();
         doNothing().when(service).close();
         doReturn("serviceFactoryMock").when(serviceFactory).toString();
+
+        doNothing().when(registration).unregister();
+        doReturn(registration).when(context).registerService(
+                eq(ConfigPusher.class.getName()), any(Closeable.class),
+                any(Dictionary.class));
     }
 
     public BundleContext getBundleContext() {
index bd092bc5bd6fb933edf165ad7d9757f056a2c69f..d5c9dc6fc79f627c4dccac89858ca694313b2fcc 100644 (file)
@@ -17,7 +17,7 @@ import org.opendaylight.controller.sal.connect.api.RemoteDeviceHandler;
 import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionCapabilities;
 import org.opendaylight.controller.sal.core.api.RpcImplementation;
 import org.opendaylight.yangtools.yang.data.api.CompositeNode;
-import org.opendaylight.yangtools.yang.model.api.SchemaContextProvider;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 
 /**
  * Implementation of RemoteDeviceHandler. Integrates cli with
@@ -41,7 +41,7 @@ public class NetconfDeviceConnectionHandler implements RemoteDeviceHandler<Netco
     }
 
     @Override
-    public synchronized void onDeviceConnected(final SchemaContextProvider contextProvider,
+    public synchronized void onDeviceConnected(final SchemaContext context,
             final NetconfSessionCapabilities capabilities, final RpcImplementation rpcImplementation) {
         console.enterRootContext(new ConsoleContext() {
 
@@ -60,8 +60,8 @@ public class NetconfDeviceConnectionHandler implements RemoteDeviceHandler<Netco
         // possible
         // TODO detect netconf base version
         // TODO detect inet types version
-        commandDispatcher.addRemoteCommands(rpcImplementation, contextProvider.getSchemaContext());
-        schemaContextRegistry.setRemoteSchemaContext(contextProvider.getSchemaContext());
+        commandDispatcher.addRemoteCommands(rpcImplementation, context);
+        schemaContextRegistry.setRemoteSchemaContext(context);
         up = true;
         this.notify();
     }
index c72705d50ebd8f505feaac092331fcbafc1ee41b..e55ec697ba560b528353fc8581c406ff58ea4ffd 100644 (file)
@@ -19,7 +19,8 @@
 
   <modules>
     <module>netconf-api</module>
-    <module>netconf-cli</module>
+      <!--FIXME make compilable-->
+    <!--<module>netconf-cli</module>-->
     <module>netconf-config</module>
     <module>netconf-impl</module>
     <module>config-netconf-connector</module>
index 45070ca8b01e64331381f67f28b367e2f80bc483..2723a3c205b6b02da2c2e6a4a460a046fda51929 100644 (file)
@@ -16,6 +16,11 @@ import org.opendaylight.controller.networkconfig.neutron.INeutronFirewallCRUD;
 import org.opendaylight.controller.networkconfig.neutron.INeutronFirewallPolicyCRUD;
 import org.opendaylight.controller.networkconfig.neutron.INeutronFirewallRuleCRUD;
 import org.opendaylight.controller.networkconfig.neutron.INeutronFloatingIPCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerHealthMonitorCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerListenerCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
 import org.opendaylight.controller.networkconfig.neutron.INeutronNetworkCRUD;
 import org.opendaylight.controller.networkconfig.neutron.INeutronPortCRUD;
 import org.opendaylight.controller.networkconfig.neutron.INeutronRouterCRUD;
@@ -74,7 +79,12 @@ public class Activator extends ComponentActivatorAbstractBase {
                 NeutronSecurityRuleInterface.class,
                 NeutronFirewallInterface.class,
                 NeutronFirewallPolicyInterface.class,
-                NeutronFirewallRuleInterface.class};
+                NeutronFirewallRuleInterface.class,
+                NeutronLoadBalancerInterface.class,
+                NeutronLoadBalancerPoolInterface.class,
+                NeutronLoadBalancerListenerInterface.class,
+                NeutronLoadBalancerHealthMonitorInterface.class,
+                NeutronLoadBalancerPoolMemberInterface.class};
         return res;
     }
 
@@ -253,5 +263,85 @@ public class Activator extends ComponentActivatorAbstractBase {
                     "setConfigurationContainerService",
                     "unsetConfigurationContainerService").setRequired(true));
         }
+        if (imp.equals(NeutronLoadBalancerInterface.class)) {
+            // export the service
+            c.setInterface(
+                    new String[] { INeutronLoadBalancerCRUD.class.getName(),
+                            IConfigurationContainerAware.class.getName()}, null);
+            Dictionary<String, String> props = new Hashtable<String, String>();
+            props.put("salListenerName", "neutron");
+            c.add(createContainerServiceDependency(containerName)
+                    .setService(IClusterContainerServices.class)
+                    .setCallbacks("setClusterContainerService",
+                            "unsetClusterContainerService").setRequired(true));
+            c.add(createContainerServiceDependency(containerName).setService(
+                    IConfigurationContainerService.class).setCallbacks(
+                    "setConfigurationContainerService",
+                    "unsetConfigurationContainerService").setRequired(true));
+        }
+        if (imp.equals(NeutronLoadBalancerListenerInterface.class)) {
+            // export the service
+            c.setInterface(
+                    new String[] { INeutronLoadBalancerListenerCRUD.class.getName(),
+                            IConfigurationContainerAware.class.getName()}, null);
+            Dictionary<String, String> props = new Hashtable<String, String>();
+            props.put("salListenerName", "neutron");
+            c.add(createContainerServiceDependency(containerName)
+                    .setService(IClusterContainerServices.class)
+                    .setCallbacks("setClusterContainerService",
+                            "unsetClusterContainerService").setRequired(true));
+            c.add(createContainerServiceDependency(containerName).setService(
+                    IConfigurationContainerService.class).setCallbacks(
+                    "setConfigurationContainerService",
+                    "unsetConfigurationContainerService").setRequired(true));
+        }
+        if (imp.equals(NeutronLoadBalancerPoolInterface.class)) {
+            // export the service
+            c.setInterface(
+                    new String[] { INeutronLoadBalancerPoolCRUD.class.getName(),
+                            IConfigurationContainerAware.class.getName()}, null);
+            Dictionary<String, String> props = new Hashtable<String, String>();
+            props.put("salListenerName", "neutron");
+            c.add(createContainerServiceDependency(containerName)
+                    .setService(IClusterContainerServices.class)
+                    .setCallbacks("setClusterContainerService",
+                            "unsetClusterContainerService").setRequired(true));
+            c.add(createContainerServiceDependency(containerName).setService(
+                    IConfigurationContainerService.class).setCallbacks(
+                    "setConfigurationContainerService",
+                    "unsetConfigurationContainerService").setRequired(true));
+        }
+        if (imp.equals(NeutronLoadBalancerHealthMonitorInterface.class)) {
+            // export the service
+            c.setInterface(
+                    new String[] { INeutronLoadBalancerHealthMonitorCRUD.class.getName(),
+                            IConfigurationContainerAware.class.getName()}, null);
+            Dictionary<String, String> props = new Hashtable<String, String>();
+            props.put("salListenerName", "neutron");
+            c.add(createContainerServiceDependency(containerName)
+                    .setService(IClusterContainerServices.class)
+                    .setCallbacks("setClusterContainerService",
+                            "unsetClusterContainerService").setRequired(true));
+            c.add(createContainerServiceDependency(containerName).setService(
+                    IConfigurationContainerService.class).setCallbacks(
+                    "setConfigurationContainerService",
+                    "unsetConfigurationContainerService").setRequired(true));
+        }
+        if (imp.equals(NeutronLoadBalancerPoolMemberInterface.class)) {
+            // export the service
+            c.setInterface(
+                    new String[] { INeutronLoadBalancerPoolMemberCRUD.class.getName(),
+                            IConfigurationContainerAware.class.getName()}, null);
+            Dictionary<String, String> props = new Hashtable<String, String>();
+            props.put("salListenerName", "neutron");
+            c.add(createContainerServiceDependency(containerName)
+                    .setService(IClusterContainerServices.class)
+                    .setCallbacks("setClusterContainerService",
+                            "unsetClusterContainerService").setRequired(true));
+            c.add(createContainerServiceDependency(containerName).setService(
+                    IConfigurationContainerService.class).setCallbacks(
+                    "setConfigurationContainerService",
+                    "unsetConfigurationContainerService").setRequired(true));
+        }
     }
 }
diff --git a/opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerHealthMonitorInterface.java b/opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerHealthMonitorInterface.java
new file mode 100644 (file)
index 0000000..b0beddf
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.implementation;
+
+import org.apache.felix.dm.Component;
+import org.opendaylight.controller.clustering.services.CacheConfigException;
+import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.clustering.services.IClusterServices;
+import org.opendaylight.controller.configuration.ConfigurationObject;
+import org.opendaylight.controller.configuration.IConfigurationContainerAware;
+import org.opendaylight.controller.configuration.IConfigurationContainerService;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerHealthMonitorCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerHealthMonitor;
+import org.opendaylight.controller.sal.utils.IObjectReader;
+import org.opendaylight.controller.sal.utils.Status;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+public class NeutronLoadBalancerHealthMonitorInterface implements INeutronLoadBalancerHealthMonitorCRUD, IConfigurationContainerAware,
+        IObjectReader {
+    private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancerHealthMonitorInterface.class);
+    private static final String FILE_NAME = "neutron.loadBalancerHealthMonitor.conf";
+    private String containerName = null;
+
+    private IClusterContainerServices clusterContainerService = null;
+    private IConfigurationContainerService configurationService;
+    private ConcurrentMap<String, NeutronLoadBalancerHealthMonitor> loadBalancerHealthMonitorDB;
+
+    // methods needed for creating caches
+    void setClusterContainerService(IClusterContainerServices s) {
+        logger.debug("Cluster Service set");
+        clusterContainerService = s;
+    }
+
+    void unsetClusterContainerService(IClusterContainerServices s) {
+        if (clusterContainerService == s) {
+            logger.debug("Cluster Service removed!");
+            clusterContainerService = null;
+        }
+    }
+
+    public void setConfigurationContainerService(IConfigurationContainerService service) {
+        logger.trace("Configuration service set: {}", service);
+        configurationService = service;
+    }
+
+    public void unsetConfigurationContainerService(IConfigurationContainerService service) {
+        logger.trace("Configuration service removed: {}", service);
+        configurationService = null;
+    }
+
+    private void allocateCache() {
+        if (this.clusterContainerService == null) {
+            logger.error("un-initialized clusterContainerService, can't create cache");
+            return;
+        }
+        logger.debug("Creating Cache for Neutron LoadBalancerHealthMonitor");
+        try {
+            // neutron caches
+            this.clusterContainerService.createCache("neutronLoadBalancerHealthMonitors",
+                    EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+        } catch (CacheConfigException cce) {
+            logger.error("Cache couldn't be created for Neutron LoadBalancerHealthMonitor -  check cache mode");
+        } catch (CacheExistException cce) {
+            logger.error("Cache for Neutron LoadBalancerHealthMonitor already exists, destroy and recreate");
+        }
+        logger.debug("Cache successfully created for Neutron LoadBalancerHealthMonitor");
+    }
+
+    @SuppressWarnings ({"unchecked"})
+    private void retrieveCache() {
+        if (clusterContainerService == null) {
+            logger.error("un-initialized clusterContainerService, can't retrieve cache");
+            return;
+        }
+
+        logger.debug("Retrieving cache for Neutron LoadBalancerHealthMonitor");
+        loadBalancerHealthMonitorDB = (ConcurrentMap<String, NeutronLoadBalancerHealthMonitor>) clusterContainerService
+                .getCache("neutronLoadBalancerHealthMonitors");
+        if (loadBalancerHealthMonitorDB == null) {
+            logger.error("Cache couldn't be retrieved for Neutron LoadBalancerHealthMonitor");
+        }
+        logger.debug("Cache was successfully retrieved for Neutron LoadBalancerHealthMonitor");
+    }
+
+    private void destroyCache() {
+        if (clusterContainerService == null) {
+            logger.error("un-initialized clusterMger, can't destroy cache");
+            return;
+        }
+        logger.debug("Destroying Cache for LoadBalancerHealthMonitor");
+        clusterContainerService.destroyCache("neutronLoadBalancerHealthMonitors");
+    }
+
+    private void startUp() {
+        allocateCache();
+        retrieveCache();
+        loadConfiguration();
+    }
+
+    /**
+     * Function called by the dependency manager when all the required
+     * dependencies are satisfied
+     */
+    void init(Component c) {
+        Dictionary<?, ?> props = c.getServiceProperties();
+        if (props != null) {
+            this.containerName = (String) props.get("containerName");
+            logger.debug("Running containerName: {}", this.containerName);
+        } else {
+            // In the Global instance case the containerName is empty
+            this.containerName = "";
+        }
+        startUp();
+    }
+
+    /**
+     * Function called by the dependency manager when at least one dependency
+     * become unsatisfied or when the component is shutting down because for
+     * example bundle is being stopped.
+     */
+    void destroy() {
+        destroyCache();
+    }
+
+    /**
+     * Function called by dependency manager after "init ()" is called and after
+     * the services provided by the class are registered in the service registry
+     */
+    void start() {
+    }
+
+    /**
+     * Function called by the dependency manager before the services exported by
+     * the component are unregistered, this will be followed by a "destroy ()"
+     * calls
+     */
+    void stop() {
+    }
+
+    // this method uses reflection to update an object from it's delta.
+
+    private boolean overwrite(Object target, Object delta) {
+        Method[] methods = target.getClass().getMethods();
+
+        for (Method toMethod : methods) {
+            if (toMethod.getDeclaringClass().equals(target.getClass())
+                    && toMethod.getName().startsWith("set")) {
+
+                String toName = toMethod.getName();
+                String fromName = toName.replace("set", "get");
+
+                try {
+                    Method fromMethod = delta.getClass().getMethod(fromName);
+                    Object value = fromMethod.invoke(delta, (Object[]) null);
+                    if (value != null) {
+                        toMethod.invoke(target, value);
+                    }
+                } catch (Exception e) {
+                    e.printStackTrace();
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
+    @Override
+    public boolean neutronLoadBalancerHealthMonitorExists(String uuid) {
+        return loadBalancerHealthMonitorDB.containsKey(uuid);
+    }
+
+    @Override
+    public NeutronLoadBalancerHealthMonitor getNeutronLoadBalancerHealthMonitor(String uuid) {
+        if (!neutronLoadBalancerHealthMonitorExists(uuid)) {
+            logger.debug("No LoadBalancerHealthMonitor has Been Defined");
+            return null;
+        }
+        return loadBalancerHealthMonitorDB.get(uuid);
+    }
+
+    @Override
+    public List<NeutronLoadBalancerHealthMonitor> getAllNeutronLoadBalancerHealthMonitors() {
+        Set<NeutronLoadBalancerHealthMonitor> allLoadBalancerHealthMonitors = new HashSet<NeutronLoadBalancerHealthMonitor>();
+        for (Entry<String, NeutronLoadBalancerHealthMonitor> entry : loadBalancerHealthMonitorDB.entrySet()) {
+            NeutronLoadBalancerHealthMonitor loadBalancerHealthMonitor = entry.getValue();
+            allLoadBalancerHealthMonitors.add(loadBalancerHealthMonitor);
+        }
+        logger.debug("Exiting getLoadBalancerHealthMonitors, Found {} OpenStackLoadBalancerHealthMonitor", allLoadBalancerHealthMonitors.size());
+        List<NeutronLoadBalancerHealthMonitor> ans = new ArrayList<NeutronLoadBalancerHealthMonitor>();
+        ans.addAll(allLoadBalancerHealthMonitors);
+        return ans;
+    }
+
+    @Override
+    public boolean addNeutronLoadBalancerHealthMonitor(NeutronLoadBalancerHealthMonitor input) {
+        if (neutronLoadBalancerHealthMonitorExists(input.getLoadBalancerHealthMonitorID())) {
+            return false;
+        }
+        loadBalancerHealthMonitorDB.putIfAbsent(input.getLoadBalancerHealthMonitorID(), input);
+        //TODO: add code to find INeutronLoadBalancerHealthMonitorAware services and call newtorkCreated on them
+        return true;
+    }
+
+    @Override
+    public boolean removeNeutronLoadBalancerHealthMonitor(String uuid) {
+        if (!neutronLoadBalancerHealthMonitorExists(uuid)) {
+            return false;
+        }
+        loadBalancerHealthMonitorDB.remove(uuid);
+        //TODO: add code to find INeutronLoadBalancerHealthMonitorAware services and call newtorkDeleted on them
+        return true;
+    }
+
+    @Override
+    public boolean updateNeutronLoadBalancerHealthMonitor(String uuid, NeutronLoadBalancerHealthMonitor delta) {
+        if (!neutronLoadBalancerHealthMonitorExists(uuid)) {
+            return false;
+        }
+        NeutronLoadBalancerHealthMonitor target = loadBalancerHealthMonitorDB.get(uuid);
+        return overwrite(target, delta);
+    }
+
+    @Override
+    public boolean neutronLoadBalancerHealthMonitorInUse(String loadBalancerHealthMonitorUUID) {
+        return !neutronLoadBalancerHealthMonitorExists(loadBalancerHealthMonitorUUID);
+    }
+
+    private void loadConfiguration() {
+        for (ConfigurationObject conf : configurationService.retrieveConfiguration(this, FILE_NAME)) {
+            NeutronLoadBalancerHealthMonitor nn = (NeutronLoadBalancerHealthMonitor) conf;
+            loadBalancerHealthMonitorDB.put(nn.getLoadBalancerHealthMonitorID(), nn);
+        }
+    }
+
+    @Override
+    public Status saveConfiguration() {
+        return configurationService.persistConfiguration(new ArrayList<ConfigurationObject>(loadBalancerHealthMonitorDB.values()),
+                FILE_NAME);
+    }
+
+    @Override
+    public Object readObject(ObjectInputStream ois) throws FileNotFoundException, IOException, ClassNotFoundException {
+        return ois.readObject();
+    }
+}
diff --git a/opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerInterface.java b/opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerInterface.java
new file mode 100644 (file)
index 0000000..aa7280b
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.implementation;
+
+import org.apache.felix.dm.Component;
+import org.opendaylight.controller.clustering.services.CacheConfigException;
+import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.clustering.services.IClusterServices;
+import org.opendaylight.controller.configuration.ConfigurationObject;
+import org.opendaylight.controller.configuration.IConfigurationContainerAware;
+import org.opendaylight.controller.configuration.IConfigurationContainerService;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancer;
+import org.opendaylight.controller.sal.utils.IObjectReader;
+import org.opendaylight.controller.sal.utils.Status;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+public class NeutronLoadBalancerInterface implements INeutronLoadBalancerCRUD, IConfigurationContainerAware,
+        IObjectReader {
+    private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancerInterface.class);
+    private static final String FILE_NAME = "neutron.loadBalancer.conf";
+    private String containerName = null;
+
+    private IClusterContainerServices clusterContainerService = null;
+    private IConfigurationContainerService configurationService;
+    private ConcurrentMap<String, NeutronLoadBalancer> loadBalancerDB;
+
+    // methods needed for creating caches
+    void setClusterContainerService(IClusterContainerServices s) {
+        logger.debug("Cluster Service set");
+        clusterContainerService = s;
+    }
+
+    void unsetClusterContainerService(IClusterContainerServices s) {
+        if (clusterContainerService == s) {
+            logger.debug("Cluster Service removed!");
+            clusterContainerService = null;
+        }
+    }
+
+    public void setConfigurationContainerService(IConfigurationContainerService service) {
+        logger.trace("Configuration service set: {}", service);
+        configurationService = service;
+    }
+
+    public void unsetConfigurationContainerService(IConfigurationContainerService service) {
+        logger.trace("Configuration service removed: {}", service);
+        configurationService = null;
+    }
+
+    private void allocateCache() {
+        if (this.clusterContainerService == null) {
+            logger.error("un-initialized clusterContainerService, can't create cache");
+            return;
+        }
+        logger.debug("Creating Cache for Neutron LoadBalancer");
+        try {
+            // neutron caches
+            this.clusterContainerService.createCache("neutronLoadBalancers",
+                    EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+        } catch (CacheConfigException cce) {
+            logger.error("Cache couldn't be created for Neutron LoadBalancer -  check cache mode");
+        } catch (CacheExistException cce) {
+            logger.error("Cache for Neutron LoadBalancer already exists, destroy and recreate");
+        }
+        logger.debug("Cache successfully created for Neutron LoadBalancer");
+    }
+
+    @SuppressWarnings ({"unchecked"})
+    private void retrieveCache() {
+        if (clusterContainerService == null) {
+            logger.error("un-initialized clusterContainerService, can't retrieve cache");
+            return;
+        }
+
+        logger.debug("Retrieving cache for Neutron LoadBalancer");
+        loadBalancerDB = (ConcurrentMap<String, NeutronLoadBalancer>) clusterContainerService
+                .getCache("neutronLoadBalancers");
+        if (loadBalancerDB == null) {
+            logger.error("Cache couldn't be retrieved for Neutron LoadBalancer");
+        }
+        logger.debug("Cache was successfully retrieved for Neutron LoadBalancer");
+    }
+
+    private void destroyCache() {
+        if (clusterContainerService == null) {
+            logger.error("un-initialized clusterMger, can't destroy cache");
+            return;
+        }
+        logger.debug("Destroying Cache for LoadBalancer");
+        clusterContainerService.destroyCache("neutronLoadBalancers");
+    }
+
+    private void startUp() {
+        allocateCache();
+        retrieveCache();
+        loadConfiguration();
+    }
+
+    /**
+     * Function called by the dependency manager when all the required
+     * dependencies are satisfied
+     */
+    void init(Component c) {
+        Dictionary<?, ?> props = c.getServiceProperties();
+        if (props != null) {
+            this.containerName = (String) props.get("containerName");
+            logger.debug("Running containerName: {}", this.containerName);
+        } else {
+            // In the Global instance case the containerName is empty
+            this.containerName = "";
+        }
+        startUp();
+    }
+
+    /**
+     * Function called by the dependency manager when at least one dependency
+     * become unsatisfied or when the component is shutting down because for
+     * example bundle is being stopped.
+     */
+    void destroy() {
+        destroyCache();
+    }
+
+    /**
+     * Function called by dependency manager after "init ()" is called and after
+     * the services provided by the class are registered in the service registry
+     */
+    void start() {
+    }
+
+    /**
+     * Function called by the dependency manager before the services exported by
+     * the component are unregistered, this will be followed by a "destroy ()"
+     * calls
+     */
+    void stop() {
+    }
+
+    // this method uses reflection to update an object from it's delta.
+
+    private boolean overwrite(Object target, Object delta) {
+        Method[] methods = target.getClass().getMethods();
+
+        for (Method toMethod : methods) {
+            if (toMethod.getDeclaringClass().equals(target.getClass())
+                    && toMethod.getName().startsWith("set")) {
+
+                String toName = toMethod.getName();
+                String fromName = toName.replace("set", "get");
+
+                try {
+                    Method fromMethod = delta.getClass().getMethod(fromName);
+                    Object value = fromMethod.invoke(delta, (Object[]) null);
+                    if (value != null) {
+                        toMethod.invoke(target, value);
+                    }
+                } catch (Exception e) {
+                    e.printStackTrace();
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
+    @Override
+    public boolean neutronLoadBalancerExists(String uuid) {
+        return loadBalancerDB.containsKey(uuid);
+    }
+
+    @Override
+    public NeutronLoadBalancer getNeutronLoadBalancer(String uuid) {
+        if (!neutronLoadBalancerExists(uuid)) {
+            logger.debug("No LoadBalancer Have Been Defined");
+            return null;
+        }
+        return loadBalancerDB.get(uuid);
+    }
+
+    @Override
+    public List<NeutronLoadBalancer> getAllNeutronLoadBalancers() {
+        Set<NeutronLoadBalancer> allLoadBalancers = new HashSet<NeutronLoadBalancer>();
+        for (Entry<String, NeutronLoadBalancer> entry : loadBalancerDB.entrySet()) {
+            NeutronLoadBalancer loadBalancer = entry.getValue();
+            allLoadBalancers.add(loadBalancer);
+        }
+        logger.debug("Exiting getLoadBalancers, Found {} OpenStackLoadBalancer", allLoadBalancers.size());
+        List<NeutronLoadBalancer> ans = new ArrayList<NeutronLoadBalancer>();
+        ans.addAll(allLoadBalancers);
+        return ans;
+    }
+
+    @Override
+    public boolean addNeutronLoadBalancer(NeutronLoadBalancer input) {
+        if (neutronLoadBalancerExists(input.getLoadBalancerID())) {
+            return false;
+        }
+        loadBalancerDB.putIfAbsent(input.getLoadBalancerID(), input);
+        //TODO: add code to find INeutronLoadBalancerAware services and call newtorkCreated on them
+        return true;
+    }
+
+    @Override
+    public boolean removeNeutronLoadBalancer(String uuid) {
+        if (!neutronLoadBalancerExists(uuid)) {
+            return false;
+        }
+        loadBalancerDB.remove(uuid);
+        //TODO: add code to find INeutronLoadBalancerAware services and call newtorkDeleted on them
+        return true;
+    }
+
+    @Override
+    public boolean updateNeutronLoadBalancer(String uuid, NeutronLoadBalancer delta) {
+        if (!neutronLoadBalancerExists(uuid)) {
+            return false;
+        }
+        NeutronLoadBalancer target = loadBalancerDB.get(uuid);
+        return overwrite(target, delta);
+    }
+
+    @Override
+    public boolean neutronLoadBalancerInUse(String loadBalancerUUID) {
+        return !neutronLoadBalancerExists(loadBalancerUUID);
+    }
+
+    private void loadConfiguration() {
+        for (ConfigurationObject conf : configurationService.retrieveConfiguration(this, FILE_NAME)) {
+            NeutronLoadBalancer nn = (NeutronLoadBalancer) conf;
+            loadBalancerDB.put(nn.getLoadBalancerID(), nn);
+        }
+    }
+
+    @Override
+    public Status saveConfiguration() {
+        return configurationService.persistConfiguration(new ArrayList<ConfigurationObject>(loadBalancerDB.values()),
+                FILE_NAME);
+    }
+
+    @Override
+    public Object readObject(ObjectInputStream ois) throws FileNotFoundException, IOException, ClassNotFoundException {
+        return ois.readObject();
+    }
+}
diff --git a/opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerListenerInterface.java b/opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerListenerInterface.java
new file mode 100644 (file)
index 0000000..3779863
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.implementation;
+
+import org.apache.felix.dm.Component;
+import org.opendaylight.controller.clustering.services.CacheConfigException;
+import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.clustering.services.IClusterServices;
+import org.opendaylight.controller.configuration.ConfigurationObject;
+import org.opendaylight.controller.configuration.IConfigurationContainerAware;
+import org.opendaylight.controller.configuration.IConfigurationContainerService;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerListenerCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerListener;
+import org.opendaylight.controller.sal.utils.IObjectReader;
+import org.opendaylight.controller.sal.utils.Status;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+public class NeutronLoadBalancerListenerInterface implements INeutronLoadBalancerListenerCRUD, IConfigurationContainerAware,
+        IObjectReader {
+    private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancerListenerInterface.class);
+    private static final String FILE_NAME = "neutron.loadBalancerListener.conf";
+    private String containerName = null;
+
+    private IClusterContainerServices clusterContainerService = null;
+    private IConfigurationContainerService configurationService;
+    private ConcurrentMap<String, NeutronLoadBalancerListener> loadBalancerListenerDB;
+
+    // methods needed for creating caches
+    void setClusterContainerService(IClusterContainerServices s) {
+        logger.debug("Cluster Service set");
+        clusterContainerService = s;
+    }
+
+    void unsetClusterContainerService(IClusterContainerServices s) {
+        if (clusterContainerService == s) {
+            logger.debug("Cluster Service removed!");
+            clusterContainerService = null;
+        }
+    }
+
+    public void setConfigurationContainerService(IConfigurationContainerService service) {
+        logger.trace("Configuration service set: {}", service);
+        configurationService = service;
+    }
+
+    public void unsetConfigurationContainerService(IConfigurationContainerService service) {
+        logger.trace("Configuration service removed: {}", service);
+        configurationService = null;
+    }
+
+    private void allocateCache() {
+        if (this.clusterContainerService == null) {
+            logger.error("un-initialized clusterContainerService, can't create cache");
+            return;
+        }
+        logger.debug("Creating Cache for Neutron LoadBalancerListener");
+        try {
+            // neutron caches
+            this.clusterContainerService.createCache("neutronLoadBalancerListeners",
+                    EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+        } catch (CacheConfigException cce) {
+            logger.error("Cache couldn't be created for Neutron LoadBalancerListener -  check cache mode");
+        } catch (CacheExistException cce) {
+            logger.error("Cache for Neutron LoadBalancerListener already exists, destroy and recreate");
+        }
+        logger.debug("Cache successfully created for Neutron LoadBalancerListener");
+    }
+
+    @SuppressWarnings ({"unchecked"})
+    private void retrieveCache() {
+        if (clusterContainerService == null) {
+            logger.error("un-initialized clusterContainerService, can't retrieve cache");
+            return;
+        }
+
+        logger.debug("Retrieving cache for Neutron LoadBalancerListener");
+        loadBalancerListenerDB = (ConcurrentMap<String, NeutronLoadBalancerListener>) clusterContainerService
+                .getCache("neutronLoadBalancerListeners");
+        if (loadBalancerListenerDB == null) {
+            logger.error("Cache couldn't be retrieved for Neutron LoadBalancerListener");
+        }
+        logger.debug("Cache was successfully retrieved for Neutron LoadBalancerListener");
+    }
+
+    private void destroyCache() {
+        if (clusterContainerService == null) {
+            logger.error("un-initialized clusterMger, can't destroy cache");
+            return;
+        }
+        logger.debug("Destroying Cache for LoadBalancerListener");
+        clusterContainerService.destroyCache("neutronLoadBalancerListeners");
+    }
+
+    private void startUp() {
+        allocateCache();
+        retrieveCache();
+        loadConfiguration();
+    }
+
+    /**
+     * Function called by the dependency manager when all the required
+     * dependencies are satisfied
+     */
+    void init(Component c) {
+        Dictionary<?, ?> props = c.getServiceProperties();
+        if (props != null) {
+            this.containerName = (String) props.get("containerName");
+            logger.debug("Running containerName: {}", this.containerName);
+        } else {
+            // In the Global instance case the containerName is empty
+            this.containerName = "";
+        }
+        startUp();
+    }
+
+    /**
+     * Function called by the dependency manager when at least one dependency
+     * become unsatisfied or when the component is shutting down because for
+     * example bundle is being stopped.
+     */
+    void destroy() {
+        destroyCache();
+    }
+
+    /**
+     * Function called by dependency manager after "init ()" is called and after
+     * the services provided by the class are registered in the service registry
+     */
+    void start() {
+    }
+
+    /**
+     * Function called by the dependency manager before the services exported by
+     * the component are unregistered, this will be followed by a "destroy ()"
+     * calls
+     */
+    void stop() {
+    }
+
+    // this method uses reflection to update an object from it's delta.
+
+    private boolean overwrite(Object target, Object delta) {
+        Method[] methods = target.getClass().getMethods();
+
+        for (Method toMethod : methods) {
+            if (toMethod.getDeclaringClass().equals(target.getClass())
+                    && toMethod.getName().startsWith("set")) {
+
+                String toName = toMethod.getName();
+                String fromName = toName.replace("set", "get");
+
+                try {
+                    Method fromMethod = delta.getClass().getMethod(fromName);
+                    Object value = fromMethod.invoke(delta, (Object[]) null);
+                    if (value != null) {
+                        toMethod.invoke(target, value);
+                    }
+                } catch (Exception e) {
+                    e.printStackTrace();
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
+    @Override
+    public boolean neutronLoadBalancerListenerExists(String uuid) {
+        return loadBalancerListenerDB.containsKey(uuid);
+    }
+
+    @Override
+    public NeutronLoadBalancerListener getNeutronLoadBalancerListener(String uuid) {
+        if (!neutronLoadBalancerListenerExists(uuid)) {
+            logger.debug("No LoadBalancerListener Have Been Defined");
+            return null;
+        }
+        return loadBalancerListenerDB.get(uuid);
+    }
+
+    @Override
+    public List<NeutronLoadBalancerListener> getAllNeutronLoadBalancerListeners() {
+        Set<NeutronLoadBalancerListener> allLoadBalancerListeners = new HashSet<NeutronLoadBalancerListener>();
+        for (Entry<String, NeutronLoadBalancerListener> entry : loadBalancerListenerDB.entrySet()) {
+            NeutronLoadBalancerListener loadBalancerListener = entry.getValue();
+            allLoadBalancerListeners.add(loadBalancerListener);
+        }
+        logger.debug("Exiting getLoadBalancerListeners, Found {} OpenStackLoadBalancerListener", allLoadBalancerListeners.size());
+        List<NeutronLoadBalancerListener> ans = new ArrayList<NeutronLoadBalancerListener>();
+        ans.addAll(allLoadBalancerListeners);
+        return ans;
+    }
+
+    @Override
+    public boolean addNeutronLoadBalancerListener(NeutronLoadBalancerListener input) {
+        if (neutronLoadBalancerListenerExists(input.getLoadBalancerListenerID())) {
+            return false;
+        }
+        loadBalancerListenerDB.putIfAbsent(input.getLoadBalancerListenerID(), input);
+        //TODO: add code to find INeutronLoadBalancerListenerAware services and call newtorkCreated on them
+        return true;
+    }
+
+    @Override
+    public boolean removeNeutronLoadBalancerListener(String uuid) {
+        if (!neutronLoadBalancerListenerExists(uuid)) {
+            return false;
+        }
+        loadBalancerListenerDB.remove(uuid);
+        //TODO: add code to find INeutronLoadBalancerListenerAware services and call newtorkDeleted on them
+        return true;
+    }
+
+    @Override
+    public boolean updateNeutronLoadBalancerListener(String uuid, NeutronLoadBalancerListener delta) {
+        if (!neutronLoadBalancerListenerExists(uuid)) {
+            return false;
+        }
+        NeutronLoadBalancerListener target = loadBalancerListenerDB.get(uuid);
+        return overwrite(target, delta);
+    }
+
+    @Override
+    public boolean neutronLoadBalancerListenerInUse(String loadBalancerListenerUUID) {
+        return !neutronLoadBalancerListenerExists(loadBalancerListenerUUID);
+    }
+
+    private void loadConfiguration() {
+        for (ConfigurationObject conf : configurationService.retrieveConfiguration(this, FILE_NAME)) {
+            NeutronLoadBalancerListener nn = (NeutronLoadBalancerListener) conf;
+            loadBalancerListenerDB.put(nn.getLoadBalancerListenerID(), nn);
+        }
+    }
+
+    @Override
+    public Status saveConfiguration() {
+        return configurationService.persistConfiguration(new ArrayList<ConfigurationObject>(loadBalancerListenerDB.values()),
+                FILE_NAME);
+    }
+
+    @Override
+    public Object readObject(ObjectInputStream ois) throws FileNotFoundException, IOException, ClassNotFoundException {
+        return ois.readObject();
+    }
+}
diff --git a/opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerPoolInterface.java b/opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerPoolInterface.java
new file mode 100644 (file)
index 0000000..34cdba3
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.implementation;
+
+import org.apache.felix.dm.Component;
+import org.opendaylight.controller.clustering.services.CacheConfigException;
+import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.clustering.services.IClusterServices;
+import org.opendaylight.controller.configuration.ConfigurationObject;
+import org.opendaylight.controller.configuration.IConfigurationContainerAware;
+import org.opendaylight.controller.configuration.IConfigurationContainerService;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
+import org.opendaylight.controller.sal.utils.IObjectReader;
+import org.opendaylight.controller.sal.utils.Status;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+public class NeutronLoadBalancerPoolInterface implements INeutronLoadBalancerPoolCRUD, IConfigurationContainerAware,
+        IObjectReader {
+    private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancerPoolInterface.class);
+    private static final String FILE_NAME = "neutron.loadBalancerPool.conf";
+    private String containerName = null;
+
+    private IClusterContainerServices clusterContainerService = null;
+    private IConfigurationContainerService configurationService;
+    private ConcurrentMap<String, NeutronLoadBalancerPool> loadBalancerPoolDB;
+
+    // methods needed for creating caches
+    void setClusterContainerService(IClusterContainerServices s) {
+        logger.debug("Cluster Service set");
+        clusterContainerService = s;
+    }
+
+    void unsetClusterContainerService(IClusterContainerServices s) {
+        if (clusterContainerService == s) {
+            logger.debug("Cluster Service removed!");
+            clusterContainerService = null;
+        }
+    }
+
+    public void setConfigurationContainerService(IConfigurationContainerService service) {
+        logger.trace("Configuration service set: {}", service);
+        configurationService = service;
+    }
+
+    public void unsetConfigurationContainerService(IConfigurationContainerService service) {
+        logger.trace("Configuration service removed: {}", service);
+        configurationService = null;
+    }
+
+    private void allocateCache() {
+        if (this.clusterContainerService == null) {
+            logger.error("un-initialized clusterContainerService, can't create cache");
+            return;
+        }
+        logger.debug("Creating Cache for Neutron LoadBalancerPool");
+        try {
+            // neutron caches
+            this.clusterContainerService.createCache("neutronLoadBalancerPools",
+                    EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+        } catch (CacheConfigException cce) {
+            logger.error("Cache couldn't be created for Neutron LoadBalancerPool -  check cache mode");
+        } catch (CacheExistException cce) {
+            logger.error("Cache for Neutron LoadBalancerPool already exists, destroy and recreate");
+        }
+        logger.debug("Cache successfully created for Neutron LoadBalancerPool");
+    }
+
+    @SuppressWarnings ({"unchecked"})
+    private void retrieveCache() {
+        if (clusterContainerService == null) {
+            logger.error("un-initialized clusterContainerService, can't retrieve cache");
+            return;
+        }
+
+        logger.debug("Retrieving cache for Neutron LoadBalancerPool");
+        loadBalancerPoolDB = (ConcurrentMap<String, NeutronLoadBalancerPool>) clusterContainerService
+                .getCache("neutronLoadBalancerPools");
+        if (loadBalancerPoolDB == null) {
+            logger.error("Cache couldn't be retrieved for Neutron LoadBalancerPool");
+        }
+        logger.debug("Cache was successfully retrieved for Neutron LoadBalancerPool");
+    }
+
+    private void destroyCache() {
+        if (clusterContainerService == null) {
+            logger.error("un-initialized clusterMger, can't destroy cache");
+            return;
+        }
+        logger.debug("Destroying Cache for LoadBalancerPool");
+        clusterContainerService.destroyCache("neutronLoadBalancerPools");
+    }
+
+    private void startUp() {
+        allocateCache();
+        retrieveCache();
+        loadConfiguration();
+    }
+
+    /**
+     * Function called by the dependency manager when all the required
+     * dependencies are satisfied
+     */
+    void init(Component c) {
+        Dictionary<?, ?> props = c.getServiceProperties();
+        if (props != null) {
+            this.containerName = (String) props.get("containerName");
+            logger.debug("Running containerName: {}", this.containerName);
+        } else {
+            // In the Global instance case the containerName is empty
+            this.containerName = "";
+        }
+        startUp();
+    }
+
+    /**
+     * Function called by the dependency manager when at least one dependency
+     * become unsatisfied or when the component is shutting down because for
+     * example bundle is being stopped.
+     */
+    void destroy() {
+        destroyCache();
+    }
+
+    /**
+     * Function called by dependency manager after "init ()" is called and after
+     * the services provided by the class are registered in the service registry
+     */
+    void start() {
+    }
+
+    /**
+     * Function called by the dependency manager before the services exported by
+     * the component are unregistered, this will be followed by a "destroy ()"
+     * calls
+     */
+    void stop() {
+    }
+
+    // this method uses reflection to update an object from it's delta.
+
+    private boolean overwrite(Object target, Object delta) {
+        Method[] methods = target.getClass().getMethods();
+
+        for (Method toMethod : methods) {
+            if (toMethod.getDeclaringClass().equals(target.getClass())
+                    && toMethod.getName().startsWith("set")) {
+
+                String toName = toMethod.getName();
+                String fromName = toName.replace("set", "get");
+
+                try {
+                    Method fromMethod = delta.getClass().getMethod(fromName);
+                    Object value = fromMethod.invoke(delta, (Object[]) null);
+                    if (value != null) {
+                        toMethod.invoke(target, value);
+                    }
+                } catch (Exception e) {
+                    e.printStackTrace();
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
+    @Override
+    public boolean neutronLoadBalancerPoolExists(String uuid) {
+        return loadBalancerPoolDB.containsKey(uuid);
+    }
+
+    @Override
+    public NeutronLoadBalancerPool getNeutronLoadBalancerPool(String uuid) {
+        if (!neutronLoadBalancerPoolExists(uuid)) {
+            logger.debug("No LoadBalancerPool has Been Defined");
+            return null;
+        }
+        return loadBalancerPoolDB.get(uuid);
+    }
+
+    @Override
+    public List<NeutronLoadBalancerPool> getAllNeutronLoadBalancerPools() {
+        Set<NeutronLoadBalancerPool> allLoadBalancerPools = new HashSet<NeutronLoadBalancerPool>();
+        for (Entry<String, NeutronLoadBalancerPool> entry : loadBalancerPoolDB.entrySet()) {
+            NeutronLoadBalancerPool loadBalancerPool = entry.getValue();
+            allLoadBalancerPools.add(loadBalancerPool);
+        }
+        logger.debug("Exiting getLoadBalancerPools, Found {} OpenStackLoadBalancerPool", allLoadBalancerPools.size());
+        List<NeutronLoadBalancerPool> ans = new ArrayList<NeutronLoadBalancerPool>();
+        ans.addAll(allLoadBalancerPools);
+        return ans;
+    }
+
+    @Override
+    public boolean addNeutronLoadBalancerPool(NeutronLoadBalancerPool input) {
+        if (neutronLoadBalancerPoolExists(input.getLoadBalancerPoolID())) {
+            return false;
+        }
+        loadBalancerPoolDB.putIfAbsent(input.getLoadBalancerPoolID(), input);
+        //TODO: add code to find INeutronLoadBalancerPoolAware services and call newtorkCreated on them
+        return true;
+    }
+
+    @Override
+    public boolean removeNeutronLoadBalancerPool(String uuid) {
+        if (!neutronLoadBalancerPoolExists(uuid)) {
+            return false;
+        }
+        loadBalancerPoolDB.remove(uuid);
+        //TODO: add code to find INeutronLoadBalancerPoolAware services and call newtorkDeleted on them
+        return true;
+    }
+
+    @Override
+    public boolean updateNeutronLoadBalancerPool(String uuid, NeutronLoadBalancerPool delta) {
+        if (!neutronLoadBalancerPoolExists(uuid)) {
+            return false;
+        }
+        NeutronLoadBalancerPool target = loadBalancerPoolDB.get(uuid);
+        return overwrite(target, delta);
+    }
+
+    @Override
+    public boolean neutronLoadBalancerPoolInUse(String loadBalancerPoolUUID) {
+        return !neutronLoadBalancerPoolExists(loadBalancerPoolUUID);
+    }
+
+    private void loadConfiguration() {
+        for (ConfigurationObject conf : configurationService.retrieveConfiguration(this, FILE_NAME)) {
+            NeutronLoadBalancerPool nn = (NeutronLoadBalancerPool) conf;
+            loadBalancerPoolDB.put(nn.getLoadBalancerPoolID(), nn);
+        }
+    }
+
+    @Override
+    public Status saveConfiguration() {
+        return configurationService.persistConfiguration(new ArrayList<ConfigurationObject>(loadBalancerPoolDB.values()),
+                FILE_NAME);
+    }
+
+    @Override
+    public Object readObject(ObjectInputStream ois) throws FileNotFoundException, IOException, ClassNotFoundException {
+        return ois.readObject();
+    }
+}
diff --git a/opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerPoolMemberInterface.java b/opendaylight/networkconfiguration/neutron/implementation/src/main/java/org/opendaylight/controller/networkconfig/neutron/implementation/NeutronLoadBalancerPoolMemberInterface.java
new file mode 100644 (file)
index 0000000..7418bb2
--- /dev/null
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.implementation;
+
+import org.apache.felix.dm.Component;
+import org.opendaylight.controller.clustering.services.CacheConfigException;
+import org.opendaylight.controller.clustering.services.CacheExistException;
+import org.opendaylight.controller.clustering.services.IClusterContainerServices;
+import org.opendaylight.controller.clustering.services.IClusterServices;
+import org.opendaylight.controller.configuration.ConfigurationObject;
+import org.opendaylight.controller.configuration.IConfigurationContainerAware;
+import org.opendaylight.controller.configuration.IConfigurationContainerService;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
+import org.opendaylight.controller.sal.utils.IObjectReader;
+import org.opendaylight.controller.sal.utils.Status;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Dictionary;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+public class NeutronLoadBalancerPoolMemberInterface
+        implements INeutronLoadBalancerPoolMemberCRUD, IConfigurationContainerAware,
+        IObjectReader {
+    private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancerPoolMemberInterface.class);
+    private static final String FILE_NAME = "neutron.loadBalancerPoolMember.conf";
+    private String containerName = null;
+
+    private IClusterContainerServices clusterContainerService = null;
+    private IConfigurationContainerService configurationService;
+    private ConcurrentMap<String, NeutronLoadBalancerPoolMember> loadBalancerPoolMemberDB;
+
+    // methods needed for creating caches
+    void setClusterContainerService(IClusterContainerServices s) {
+        logger.debug("Cluster Service set");
+        clusterContainerService = s;
+    }
+
+    void unsetClusterContainerService(IClusterContainerServices s) {
+        if (clusterContainerService == s) {
+            logger.debug("Cluster Service removed!");
+            clusterContainerService = null;
+        }
+    }
+
+    public void setConfigurationContainerService(IConfigurationContainerService service) {
+        logger.trace("Configuration service set: {}", service);
+        configurationService = service;
+    }
+
+    public void unsetConfigurationContainerService(IConfigurationContainerService service) {
+        logger.trace("Configuration service removed: {}", service);
+        configurationService = null;
+    }
+
+    private void allocateCache() {
+        if (this.clusterContainerService == null) {
+            logger.error("un-initialized clusterContainerService, can't create cache");
+            return;
+        }
+        logger.debug("Creating Cache for Neutron LoadBalancerPoolMember");
+        try {
+            // neutron caches
+            this.clusterContainerService.createCache("neutronLoadBalancerPoolMembers",
+                    EnumSet.of(IClusterServices.cacheMode.NON_TRANSACTIONAL));
+        } catch(CacheConfigException cce) {
+            logger.error("Cache couldn't be created for Neutron LoadBalancerPoolMember -  check cache mode");
+        } catch(CacheExistException cce) {
+            logger.error("Cache for Neutron LoadBalancerPoolMember already exists, destroy and recreate");
+        }
+        logger.debug("Cache successfully created for Neutron LoadBalancerPoolMember");
+    }
+
+    @SuppressWarnings({"unchecked"})
+    private void retrieveCache() {
+        if (clusterContainerService == null) {
+            logger.error("un-initialized clusterContainerService, can't retrieve cache");
+            return;
+        }
+
+        logger.debug("Retrieving cache for Neutron LoadBalancerPoolMember");
+        loadBalancerPoolMemberDB = (ConcurrentMap<String, NeutronLoadBalancerPoolMember>) clusterContainerService
+                .getCache("neutronLoadBalancerPoolMembers");
+        if (loadBalancerPoolMemberDB == null) {
+            logger.error("Cache couldn't be retrieved for Neutron LoadBalancerPoolMember");
+        }
+        logger.debug("Cache was successfully retrieved for Neutron LoadBalancerPoolMember");
+    }
+
+    private void destroyCache() {
+        if (clusterContainerService == null) {
+            logger.error("un-initialized clusterMger, can't destroy cache");
+            return;
+        }
+        logger.debug("Destroying Cache for HostTracker");
+        clusterContainerService.destroyCache("neutronLoadBalancerPoolMembers");
+    }
+
+    private void startUp() {
+        allocateCache();
+        retrieveCache();
+        loadConfiguration();
+    }
+
+    /**
+     * Function called by the dependency manager when all the required
+     * dependencies are satisfied
+     */
+    void init(Component c) {
+        Dictionary<?, ?> props = c.getServiceProperties();
+        if (props != null) {
+            this.containerName = (String) props.get("containerName");
+            logger.debug("Running containerName: {}", this.containerName);
+        } else {
+            // In the Global instance case the containerName is empty
+            this.containerName = "";
+        }
+        startUp();
+    }
+
+    /**
+     * Function called by the dependency manager when at least one dependency
+     * become unsatisfied or when the component is shutting down because for
+     * example bundle is being stopped.
+     */
+    void destroy() {
+        destroyCache();
+    }
+
+    /**
+     * Function called by dependency manager after "init ()" is called and after
+     * the services provided by the class are registered in the service registry
+     */
+    void start() {
+    }
+
+    /**
+     * Function called by the dependency manager before the services exported by
+     * the component are unregistered, this will be followed by a "destroy ()"
+     * calls
+     */
+    void stop() {
+    }
+
+    // this method uses reflection to update an object from it's delta.
+
+    private boolean overwrite(Object target, Object delta) {
+        Method[] methods = target.getClass().getMethods();
+
+        for (Method toMethod : methods) {
+            if (toMethod.getDeclaringClass().equals(target.getClass())
+                    && toMethod.getName().startsWith("set")) {
+
+                String toName = toMethod.getName();
+                String fromName = toName.replace("set", "get");
+
+                try {
+                    Method fromMethod = delta.getClass().getMethod(fromName);
+                    Object value = fromMethod.invoke(delta, (Object[]) null);
+                    if (value != null) {
+                        toMethod.invoke(target, value);
+                    }
+                } catch(Exception e) {
+                    e.printStackTrace();
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
+    @Override
+    public boolean neutronLoadBalancerPoolMemberExists(String uuid) {
+        return loadBalancerPoolMemberDB.containsKey(uuid);
+    }
+
+    @Override
+    public NeutronLoadBalancerPoolMember getNeutronLoadBalancerPoolMember(String uuid) {
+        if (!neutronLoadBalancerPoolMemberExists(uuid)) {
+            logger.debug("No LoadBalancerPoolMember Have Been Defined");
+            return null;
+        }
+        return loadBalancerPoolMemberDB.get(uuid);
+    }
+
+    @Override
+    public List<NeutronLoadBalancerPoolMember> getAllNeutronLoadBalancerPoolMembers() {
+        Set<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = new HashSet<NeutronLoadBalancerPoolMember>();
+        for (Map.Entry<String, NeutronLoadBalancerPoolMember> entry : loadBalancerPoolMemberDB.entrySet()) {
+            NeutronLoadBalancerPoolMember loadBalancerPoolMember = entry.getValue();
+            allLoadBalancerPoolMembers.add(loadBalancerPoolMember);
+        }
+        logger.debug("Exiting getLoadBalancerPoolMembers, Found {} OpenStackLoadBalancerPoolMember",
+                allLoadBalancerPoolMembers.size());
+        List<NeutronLoadBalancerPoolMember> ans = new ArrayList<NeutronLoadBalancerPoolMember>();
+        ans.addAll(allLoadBalancerPoolMembers);
+        return ans;
+    }
+
+    @Override
+    public boolean addNeutronLoadBalancerPoolMember(NeutronLoadBalancerPoolMember input) {
+        if (neutronLoadBalancerPoolMemberExists(input.getPoolMemberID())) {
+            return false;
+        }
+        loadBalancerPoolMemberDB.putIfAbsent(input.getPoolMemberID(), input);
+        return true;
+    }
+
+    @Override
+    public boolean removeNeutronLoadBalancerPoolMember(String uuid) {
+        if (!neutronLoadBalancerPoolMemberExists(uuid)) {
+            return false;
+        }
+        loadBalancerPoolMemberDB.remove(uuid);
+        return true;
+    }
+
+    @Override
+    public boolean updateNeutronLoadBalancerPoolMember(String uuid, NeutronLoadBalancerPoolMember delta) {
+        if (!neutronLoadBalancerPoolMemberExists(uuid)) {
+            return false;
+        }
+        NeutronLoadBalancerPoolMember target = loadBalancerPoolMemberDB.get(uuid);
+        return overwrite(target, delta);
+    }
+
+    @Override
+    public boolean neutronLoadBalancerPoolMemberInUse(String loadBalancerPoolMemberID) {
+        return !neutronLoadBalancerPoolMemberExists(loadBalancerPoolMemberID);
+    }
+
+    private void loadConfiguration() {
+        for (ConfigurationObject conf : configurationService.retrieveConfiguration(this, FILE_NAME)) {
+            NeutronLoadBalancerPoolMember nn = (NeutronLoadBalancerPoolMember) conf;
+            loadBalancerPoolMemberDB.put(nn.getPoolMemberID(), nn);
+        }
+    }
+
+    @Override
+    public Status saveConfiguration() {
+        return configurationService.persistConfiguration(
+                new ArrayList<ConfigurationObject>(loadBalancerPoolMemberDB.values()),
+                FILE_NAME);
+    }
+
+    @Override
+    public Object readObject(ObjectInputStream ois) throws FileNotFoundException, IOException, ClassNotFoundException {
+        return ois.readObject();
+    }
+}
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerAware.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerAware.java
new file mode 100644 (file)
index 0000000..e4aa5f3
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+/**
+ * This interface defines the methods a service that wishes to be aware of LoadBalancer Rules needs to implement
+ *
+ */
+
+public interface INeutronLoadBalancerAware {
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancer can be created
+     *
+     * @param loadBalancer
+     *            instance of proposed new LoadBalancer object
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the create operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canCreateNeutronLoadBalancer(NeutronLoadBalancer loadBalancer);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancer has been created
+     *
+     * @param loadBalancer
+     *            instance of new LoadBalancer object
+     * @return void
+     */
+    public void neutronLoadBalancerCreated(NeutronLoadBalancer loadBalancer);
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancer can be changed using the specified
+     * delta
+     *
+     * @param delta
+     *            updates to the loadBalancer object using patch semantics
+     * @param original
+     *            instance of the LoadBalancer object to be updated
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the update operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canUpdateNeutronLoadBalancer(NeutronLoadBalancer delta, NeutronLoadBalancer original);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancer has been updated
+     *
+     * @param loadBalancer
+     *            instance of modified LoadBalancer object
+     * @return void
+     */
+    public void neutronLoadBalancerUpdated(NeutronLoadBalancer loadBalancer);
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancer can be deleted
+     *
+     * @param loadBalancer
+     *            instance of the LoadBalancer object to be deleted
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the delete operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canDeleteNeutronLoadBalancer(NeutronLoadBalancer loadBalancer);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancer has been deleted
+     *
+     * @param loadBalancer
+     *            instance of deleted LoadBalancer object
+     * @return void
+     */
+    public void neutronLoadBalancerDeleted(NeutronLoadBalancer loadBalancer);
+}
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerCRUD.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerCRUD.java
new file mode 100644 (file)
index 0000000..a2ce41e
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import java.util.List;
+
+/**
+ * This interface defines the methods for CRUD of NB OpenStack LoadBalancer objects
+ *
+ */
+
+public interface INeutronLoadBalancerCRUD {
+    /**
+     * Applications call this interface method to determine if a particular
+     *LoadBalancer object exists
+     *
+     * @param uuid
+     *            UUID of the LoadBalancer object
+     * @return boolean
+     */
+
+    public boolean neutronLoadBalancerExists(String uuid);
+
+    /**
+     * Applications call this interface method to return if a particular
+     * LoadBalancer object exists
+     *
+     * @param uuid
+     *            UUID of the LoadBalancer object
+     * @return {@link NeutronLoadBalancer}
+     *          OpenStackLoadBalancer class
+     */
+
+    public NeutronLoadBalancer getNeutronLoadBalancer(String uuid);
+
+    /**
+     * Applications call this interface method to return all LoadBalancer objects
+     *
+     * @return List of OpenStackNetworks objects
+     */
+
+    public List<NeutronLoadBalancer> getAllNeutronLoadBalancers();
+
+    /**
+     * Applications call this interface method to add a LoadBalancer object to the
+     * concurrent map
+     *
+     * @param input
+     *            OpenStackNetwork object
+     * @return boolean on whether the object was added or not
+     */
+
+    public boolean addNeutronLoadBalancer(NeutronLoadBalancer input);
+
+    /**
+     * Applications call this interface method to remove a Neutron LoadBalancer object to the
+     * concurrent map
+     *
+     * @param uuid
+     *            identifier for the LoadBalancer object
+     * @return boolean on whether the object was removed or not
+     */
+
+    public boolean removeNeutronLoadBalancer(String uuid);
+
+    /**
+     * Applications call this interface method to edit a LoadBalancer object
+     *
+     * @param uuid
+     *            identifier of the LoadBalancer object
+     * @param delta
+     *            OpenStackLoadBalancer object containing changes to apply
+     * @return boolean on whether the object was updated or not
+     */
+
+    public boolean updateNeutronLoadBalancer(String uuid, NeutronLoadBalancer delta);
+
+    /**
+     * Applications call this interface method to see if a MAC address is in use
+     *
+     * @param uuid
+     *            identifier of the LoadBalancer object
+     * @return boolean on whether the macAddress is already associated with a
+     * port or not
+     */
+
+    public boolean neutronLoadBalancerInUse(String uuid);
+
+}
\ No newline at end of file
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerHealthMonitorAware.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerHealthMonitorAware.java
new file mode 100644 (file)
index 0000000..7194da3
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+/**
+ * This interface defines the methods a service that wishes to be aware of LoadBalancerHealthMonitor Rules needs to implement
+ *
+ */
+
+public interface INeutronLoadBalancerHealthMonitorAware {
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancerHealthMonitor can be created
+     *
+     * @param loadBalancerHealthMonitor
+     *            instance of proposed new LoadBalancerHealthMonitor object
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the create operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canCreateNeutronLoadBalancerHealthMonitor(NeutronLoadBalancerHealthMonitor loadBalancerHealthMonitor);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancerHealthMonitor has been created
+     *
+     * @param loadBalancerHealthMonitor
+     *            instance of new LoadBalancerHealthMonitor object
+     * @return void
+     */
+    public void neutronLoadBalancerHealthMonitorCreated(NeutronLoadBalancerHealthMonitor loadBalancerHealthMonitor);
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancerHealthMonitor can be changed using the specified
+     * delta
+     *
+     * @param delta
+     *            updates to the loadBalancerHealthMonitor object using patch semantics
+     * @param original
+     *            instance of the LoadBalancerHealthMonitor object to be updated
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the update operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canUpdateNeutronLoadBalancerHealthMonitor(NeutronLoadBalancerHealthMonitor delta,
+            NeutronLoadBalancerHealthMonitor original);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancerHealthMonitor has been updated
+     *
+     * @param loadBalancerHealthMonitor
+     *            instance of modified LoadBalancerHealthMonitor object
+     * @return void
+     */
+    public void neutronLoadBalancerHealthMonitorUpdated(NeutronLoadBalancerHealthMonitor loadBalancerHealthMonitor);
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancerHealthMonitor can be deleted
+     *
+     * @param loadBalancerHealthMonitor
+     *            instance of the LoadBalancerHealthMonitor object to be deleted
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the delete operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canDeleteNeutronLoadBalancerHealthMonitor(NeutronLoadBalancerHealthMonitor loadBalancerHealthMonitor);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancerHealthMonitor has been deleted
+     *
+     * @param loadBalancerHealthMonitor
+     *            instance of deleted LoadBalancerHealthMonitor object
+     * @return void
+     */
+    public void neutronLoadBalancerHealthMonitorDeleted(NeutronLoadBalancerHealthMonitor loadBalancerHealthMonitor);
+}
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerHealthMonitorCRUD.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerHealthMonitorCRUD.java
new file mode 100644 (file)
index 0000000..7838000
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import java.util.List;
+
+/**
+ * This interface defines the methods for CRUD of NB OpenStack LoadBalancerHealthMonitor objects
+ *
+ */
+
+public interface INeutronLoadBalancerHealthMonitorCRUD {
+    /**
+     * Applications call this interface method to determine if a particular
+     *LoadBalancerHealthMonitor object exists
+     *
+     * @param uuid
+     *            UUID of the LoadBalancerHealthMonitor object
+     * @return boolean
+     */
+
+    public boolean neutronLoadBalancerHealthMonitorExists(String uuid);
+
+    /**
+     * Applications call this interface method to return if a particular
+     * LoadBalancerHealthMonitor object exists
+     *
+     * @param uuid
+     *            UUID of the LoadBalancerHealthMonitor object
+     * @return {@link NeutronLoadBalancerHealthMonitor}
+     *          OpenStackLoadBalancerHealthMonitor class
+     */
+
+    public NeutronLoadBalancerHealthMonitor getNeutronLoadBalancerHealthMonitor(String uuid);
+
+    /**
+     * Applications call this interface method to return all LoadBalancerHealthMonitor objects
+     *
+     * @return List of OpenStackNetworks objects
+     */
+
+    public List<NeutronLoadBalancerHealthMonitor> getAllNeutronLoadBalancerHealthMonitors();
+
+    /**
+     * Applications call this interface method to add a LoadBalancerHealthMonitor object to the
+     * concurrent map
+     *
+     * @param input
+     *            OpenStackNetwork object
+     * @return boolean on whether the object was added or not
+     */
+
+    public boolean addNeutronLoadBalancerHealthMonitor(NeutronLoadBalancerHealthMonitor input);
+
+    /**
+     * Applications call this interface method to remove a Neutron LoadBalancerHealthMonitor object to the
+     * concurrent map
+     *
+     * @param uuid
+     *            identifier for the LoadBalancerHealthMonitor object
+     * @return boolean on whether the object was removed or not
+     */
+
+    public boolean removeNeutronLoadBalancerHealthMonitor(String uuid);
+
+    /**
+     * Applications call this interface method to edit a LoadBalancerHealthMonitor object
+     *
+     * @param uuid
+     *            identifier of the LoadBalancerHealthMonitor object
+     * @param delta
+     *            OpenStackLoadBalancerHealthMonitor object containing changes to apply
+     * @return boolean on whether the object was updated or not
+     */
+
+    public boolean updateNeutronLoadBalancerHealthMonitor(String uuid, NeutronLoadBalancerHealthMonitor delta);
+
+    /**
+     * Applications call this interface method to see if a MAC address is in use
+     *
+     * @param uuid
+     *            identifier of the LoadBalancerHealthMonitor object
+     * @return boolean on whether the macAddress is already associated with a
+     * port or not
+     */
+
+    public boolean neutronLoadBalancerHealthMonitorInUse(String uuid);
+
+}
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerListenerAware.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerListenerAware.java
new file mode 100644 (file)
index 0000000..417419f
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+/**
+ * This interface defines the methods a service that wishes to be aware of LoadBalancerListener Rules needs to implement
+ *
+ */
+
+public interface INeutronLoadBalancerListenerAware {
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancerListener can be created
+     *
+     * @param loadBalancerListener
+     *            instance of proposed new LoadBalancerListener object
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the create operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canCreateNeutronLoadBalancerListener(NeutronLoadBalancerListener loadBalancerListener);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancerListener has been created
+     *
+     * @param loadBalancerListener
+     *            instance of new LoadBalancerListener object
+     * @return void
+     */
+    public void neutronLoadBalancerListenerCreated(NeutronLoadBalancerListener loadBalancerListener);
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancerListener can be changed using the specified
+     * delta
+     *
+     * @param delta
+     *            updates to the loadBalancerListener object using patch semantics
+     * @param original
+     *            instance of the LoadBalancerListener object to be updated
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the update operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canUpdateNeutronLoadBalancerListener(NeutronLoadBalancerListener delta,
+            NeutronLoadBalancerListener original);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancerListener has been updated
+     *
+     * @param loadBalancerListener
+     *            instance of modified LoadBalancerListener object
+     * @return void
+     */
+    public void neutronLoadBalancerListenerUpdated(NeutronLoadBalancerListener loadBalancerListener);
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancerListener can be deleted
+     *
+     * @param loadBalancerListener
+     *            instance of the LoadBalancerListener object to be deleted
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the delete operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canDeleteNeutronLoadBalancerListener(NeutronLoadBalancerListener loadBalancerListener);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancerListener has been deleted
+     *
+     * @param loadBalancerListener
+     *            instance of deleted LoadBalancerListener object
+     * @return void
+     */
+    public void neutronLoadBalancerListenerDeleted(NeutronLoadBalancerListener loadBalancerListener);
+}
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerListenerCRUD.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerListenerCRUD.java
new file mode 100644 (file)
index 0000000..c160f8e
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import java.util.List;
+
+/**
+ * This interface defines the methods for CRUD of NB OpenStack LoadBalancerListener objects
+ *
+ */
+
+public interface INeutronLoadBalancerListenerCRUD {
+    /**
+     * Applications call this interface method to determine if a particular
+     *LoadBalancerListener object exists
+     *
+     * @param uuid
+     *            UUID of the LoadBalancerListener object
+     * @return boolean
+     */
+
+    public boolean neutronLoadBalancerListenerExists(String uuid);
+
+    /**
+     * Applications call this interface method to return if a particular
+     * LoadBalancerListener object exists
+     *
+     * @param uuid
+     *            UUID of the LoadBalancerListener object
+     * @return {@link NeutronLoadBalancerListener}
+     *          OpenStackLoadBalancerListener class
+     */
+
+    public NeutronLoadBalancerListener getNeutronLoadBalancerListener(String uuid);
+
+    /**
+     * Applications call this interface method to return all LoadBalancerListener objects
+     *
+     * @return List of OpenStackNetworks objects
+     */
+
+    public List<NeutronLoadBalancerListener> getAllNeutronLoadBalancerListeners();
+
+    /**
+     * Applications call this interface method to add a LoadBalancerListener object to the
+     * concurrent map
+     *
+     * @param input
+     *            OpenStackNetwork object
+     * @return boolean on whether the object was added or not
+     */
+
+    public boolean addNeutronLoadBalancerListener(NeutronLoadBalancerListener input);
+
+    /**
+     * Applications call this interface method to remove a Neutron LoadBalancerListener object to the
+     * concurrent map
+     *
+     * @param uuid
+     *            identifier for the LoadBalancerListener object
+     * @return boolean on whether the object was removed or not
+     */
+
+    public boolean removeNeutronLoadBalancerListener(String uuid);
+
+    /**
+     * Applications call this interface method to edit a LoadBalancerListener object
+     *
+     * @param uuid
+     *            identifier of the LoadBalancerListener object
+     * @param delta
+     *            OpenStackLoadBalancerListener object containing changes to apply
+     * @return boolean on whether the object was updated or not
+     */
+
+    public boolean updateNeutronLoadBalancerListener(String uuid, NeutronLoadBalancerListener delta);
+
+    /**
+     * Applications call this interface method to see if a MAC address is in use
+     *
+     * @param uuid
+     *            identifier of the LoadBalancerListener object
+     * @return boolean on whether the macAddress is already associated with a
+     * port or not
+     */
+
+    public boolean neutronLoadBalancerListenerInUse(String uuid);
+
+}
\ No newline at end of file
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolAware.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolAware.java
new file mode 100644 (file)
index 0000000..16c7d37
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+/**
+ * This interface defines the methods a service that wishes to be aware of LoadBalancerPool Rules needs to implement
+ *
+ */
+
+public interface INeutronLoadBalancerPoolAware {
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancerPool can be created
+     *
+     * @param loadBalancerPool
+     *            instance of proposed new LoadBalancerPool object
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the create operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canCreateNeutronLoadBalancerPool(NeutronLoadBalancerPool loadBalancerPool);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancerPool has been created
+     *
+     * @param loadBalancerPool
+     *            instance of new LoadBalancerPool object
+     * @return void
+     */
+    public void neutronLoadBalancerPoolCreated(NeutronLoadBalancerPool loadBalancerPool);
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancerPool can be changed using the specified
+     * delta
+     *
+     * @param delta
+     *            updates to the loadBalancerPool object using patch semantics
+     * @param original
+     *            instance of the LoadBalancerPool object to be updated
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the update operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canUpdateNeutronLoadBalancerPool(NeutronLoadBalancerPool delta, NeutronLoadBalancerPool original);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancerPool has been updated
+     *
+     * @param loadBalancerPool
+     *            instance of modified LoadBalancerPool object
+     * @return void
+     */
+    public void neutronLoadBalancerPoolUpdated(NeutronLoadBalancerPool loadBalancerPool);
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancerPool can be deleted
+     *
+     * @param loadBalancerPool
+     *            instance of the LoadBalancerPool object to be deleted
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the delete operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canDeleteNeutronLoadBalancerPool(NeutronLoadBalancerPool loadBalancerPool);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancerPool has been deleted
+     *
+     * @param loadBalancerPool
+     *            instance of deleted LoadBalancerPool object
+     * @return void
+     */
+    public void neutronLoadBalancerPoolDeleted(NeutronLoadBalancerPool loadBalancerPool);
+}
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolCRUD.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolCRUD.java
new file mode 100644 (file)
index 0000000..9614448
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import java.util.List;
+
+/**
+ * This interface defines the methods for CRUD of NB OpenStack LoadBalancerPool objects
+ *
+ */
+
+public interface INeutronLoadBalancerPoolCRUD {
+    /**
+     * Applications call this interface method to determine if a particular
+     *LoadBalancerPool object exists
+     *
+     * @param uuid
+     *            UUID of the LoadBalancerPool object
+     * @return boolean
+     */
+
+    public boolean neutronLoadBalancerPoolExists(String uuid);
+
+    /**
+     * Applications call this interface method to return if a particular
+     * LoadBalancerPool object exists
+     *
+     * @param uuid
+     *            UUID of the LoadBalancerPool object
+     * @return {@link NeutronLoadBalancerPool}
+     *          OpenStackLoadBalancerPool class
+     */
+
+    public NeutronLoadBalancerPool getNeutronLoadBalancerPool(String uuid);
+
+    /**
+     * Applications call this interface method to return all LoadBalancerPool objects
+     *
+     * @return List of OpenStackNetworks objects
+     */
+
+    public List<NeutronLoadBalancerPool> getAllNeutronLoadBalancerPools();
+
+    /**
+     * Applications call this interface method to add a LoadBalancerPool object to the
+     * concurrent map
+     *
+     * @param input
+     *            OpenStackNetwork object
+     * @return boolean on whether the object was added or not
+     */
+
+    public boolean addNeutronLoadBalancerPool(NeutronLoadBalancerPool input);
+
+    /**
+     * Applications call this interface method to remove a Neutron LoadBalancerPool object to the
+     * concurrent map
+     *
+     * @param uuid
+     *            identifier for the LoadBalancerPool object
+     * @return boolean on whether the object was removed or not
+     */
+
+    public boolean removeNeutronLoadBalancerPool(String uuid);
+
+    /**
+     * Applications call this interface method to edit a LoadBalancerPool object
+     *
+     * @param uuid
+     *            identifier of the LoadBalancerPool object
+     * @param delta
+     *            OpenStackLoadBalancerPool object containing changes to apply
+     * @return boolean on whether the object was updated or not
+     */
+
+    public boolean updateNeutronLoadBalancerPool(String uuid, NeutronLoadBalancerPool delta);
+
+    /**
+     * Applications call this interface method to see if a MAC address is in use
+     *
+     * @param uuid
+     *            identifier of the LoadBalancerPool object
+     * @return boolean on whether the macAddress is already associated with a
+     * port or not
+     */
+
+    public boolean neutronLoadBalancerPoolInUse(String uuid);
+
+}
\ No newline at end of file
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolMemberAware.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolMemberAware.java
new file mode 100644 (file)
index 0000000..0a1da77
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.networkconfig.neutron;
+
+public interface INeutronLoadBalancerPoolMemberAware {
+
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancerPoolMember can be created
+     *
+     * @param loadBalancerPoolMember
+     *            instance of proposed new LoadBalancerPool object
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the create operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canCreateNeutronLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancerPoolMember has been created
+     *
+     * @param loadBalancerPoolMember
+     *            instance of new LoadBalancerPool object
+     * @return void
+     */
+    public void neutronLoadBalancerPoolMemberCreated(NeutronLoadBalancerPoolMember loadBalancerPoolMember);
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancerPoolMember can be changed using the specified
+     * delta
+     *
+     * @param delta
+     *            updates to the loadBalancerPoolMember object using patch semantics
+     * @param original
+     *            instance of the LoadBalancerPool object to be updated
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the update operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canUpdateNeutronLoadBalancerPoolMember(NeutronLoadBalancerPoolMember delta,
+            NeutronLoadBalancerPoolMember original);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancerPoolMember has been updated
+     *
+     * @param loadBalancerPoolMember
+     *            instance of modified LoadBalancerPool object
+     * @return void
+     */
+    public void neutronLoadBalancerPoolMemberUpdated(NeutronLoadBalancerPoolMember loadBalancerPoolMember);
+
+    /**
+     * Services provide this interface method to indicate if the specified loadBalancerPoolMember can be deleted
+     *
+     * @param loadBalancerPoolMember
+     *            instance of the LoadBalancerPool object to be deleted
+     * @return integer
+     *            the return value is understood to be a HTTP status code.  A return value outside of 200 through 299
+     *            results in the delete operation being interrupted and the returned status value reflected in the
+     *            HTTP response.
+     */
+    public int canDeleteNeutronLoadBalancerPoolMember(NeutronLoadBalancerPoolMember loadBalancerPoolMember);
+
+    /**
+     * Services provide this interface method for taking action after a loadBalancerPoolMember has been deleted
+     *
+     * @param loadBalancerPoolMember
+     *            instance of deleted LoadBalancerPool object
+     * @return void
+     */
+    public void NeutronLoadBalancerPoolMemberDeleted(NeutronLoadBalancerPoolMember loadBalancerPoolMember);
+}
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolMemberCRUD.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolMemberCRUD.java
new file mode 100644 (file)
index 0000000..c1f5c70
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import java.util.List;
+
+public interface INeutronLoadBalancerPoolMemberCRUD {
+
+    /**
+     * Applications call this interface method to determine if a particular
+     *NeutronLoadBalancerPoolMember object exists
+     *
+     * @param uuid
+     *            UUID of the NeutronLoadBalancerPoolMember object
+     * @return boolean
+     */
+
+    public boolean neutronLoadBalancerPoolMemberExists(String uuid);
+
+    /**
+     * Applications call this interface method to return if a particular
+     * NeutronLoadBalancerPoolMember object exists
+     *
+     * @param uuid
+     *            UUID of the NeutronLoadBalancerPoolMember object
+     * @return {@link org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember}
+     *          OpenStackNeutronLoadBalancerPoolMember class
+     */
+
+    public NeutronLoadBalancerPoolMember getNeutronLoadBalancerPoolMember(String uuid);
+
+    /**
+     * Applications call this interface method to return all NeutronLoadBalancerPoolMember objects
+     *
+     * @return List of OpenStackNetworks objects
+     */
+
+    public List<NeutronLoadBalancerPoolMember> getAllNeutronLoadBalancerPoolMembers();
+
+    /**
+     * Applications call this interface method to add a NeutronLoadBalancerPoolMember object to the
+     * concurrent map
+     *
+     * @param input
+     *            OpenStackNetwork object
+     * @return boolean on whether the object was added or not
+     */
+
+    public boolean addNeutronLoadBalancerPoolMember(NeutronLoadBalancerPoolMember input);
+
+    /**
+     * Applications call this interface method to remove a Neutron NeutronLoadBalancerPoolMember object to the
+     * concurrent map
+     *
+     * @param uuid
+     *            identifier for the NeutronLoadBalancerPoolMember object
+     * @return boolean on whether the object was removed or not
+     */
+
+    public boolean removeNeutronLoadBalancerPoolMember(String uuid);
+
+    /**
+     * Applications call this interface method to edit a NeutronLoadBalancerPoolMember object
+     *
+     * @param uuid
+     *            identifier of the NeutronLoadBalancerPoolMember object
+     * @param delta
+     *            OpenStackNeutronLoadBalancerPoolMember object containing changes to apply
+     * @return boolean on whether the object was updated or not
+     */
+
+    public boolean updateNeutronLoadBalancerPoolMember(String uuid, NeutronLoadBalancerPoolMember delta);
+
+    /**
+     * Applications call this interface method to see if a MAC address is in use
+     *
+     * @param uuid
+     *            identifier of the NeutronLoadBalancerPoolMember object
+     * @return boolean on whether the macAddress is already associated with a
+     * port or not
+     */
+
+    public boolean neutronLoadBalancerPoolMemberInUse(String uuid);
+
+}
\ No newline at end of file
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolMemberRequest.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/INeutronLoadBalancerPoolMemberRequest.java
new file mode 100644 (file)
index 0000000..d8c5eb9
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import javax.xml.bind.annotation.XmlElement;
+import java.util.List;
+
+public class INeutronLoadBalancerPoolMemberRequest {
+
+    /**
+     * See OpenStack Network API v2.0 Reference for description of
+     * http://docs.openstack.org/api/openstack-network/2.0/content/
+     */
+
+    @XmlElement(name="member")
+    NeutronLoadBalancerPoolMember singletonLoadBalancerPoolMember;
+
+    @XmlElement(name="members")
+    List<NeutronLoadBalancerPoolMember> bulkRequest;
+
+    INeutronLoadBalancerPoolMemberRequest() {
+    }
+
+    public INeutronLoadBalancerPoolMemberRequest(List<NeutronLoadBalancerPoolMember> bulk) {
+        bulkRequest = bulk;
+        singletonLoadBalancerPoolMember = null;
+    }
+
+    INeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) {
+        singletonLoadBalancerPoolMember = group;
+    }
+
+    public List<NeutronLoadBalancerPoolMember> getBulk() {
+        return bulkRequest;
+    }
+
+    public NeutronLoadBalancerPoolMember getSingleton() {
+        return singletonLoadBalancerPoolMember;
+    }
+
+    public boolean isSingleton() {
+        return (singletonLoadBalancerPoolMember != null);
+    }
+}
\ No newline at end of file
index 6ce5499cdf9a2cc99ffda04d75d13a11d4c11bbb..472debe38312b45df11094b2646b39cfc485d19c 100644 (file)
@@ -61,4 +61,29 @@ public class NeutronCRUDInterfaces {
         INeutronFirewallRuleCRUD answer = (INeutronFirewallRuleCRUD) ServiceHelper.getGlobalInstance(INeutronFirewallRuleCRUD.class, o);
         return answer;
     }
+
+    public static INeutronLoadBalancerCRUD getINeutronLoadBalancerCRUD(Object o) {
+        INeutronLoadBalancerCRUD answer = (INeutronLoadBalancerCRUD) ServiceHelper.getGlobalInstance(INeutronLoadBalancerCRUD.class, o);
+        return answer;
+    }
+
+    public static INeutronLoadBalancerPoolCRUD getINeutronLoadBalancerPoolCRUD(Object o) {
+        INeutronLoadBalancerPoolCRUD answer = (INeutronLoadBalancerPoolCRUD) ServiceHelper.getGlobalInstance(INeutronLoadBalancerPoolCRUD.class, o);
+        return answer;
+    }
+
+    public static INeutronLoadBalancerListenerCRUD getINeutronLoadBalancerListenerCRUD(Object o) {
+        INeutronLoadBalancerListenerCRUD answer = (INeutronLoadBalancerListenerCRUD) ServiceHelper.getGlobalInstance(INeutronLoadBalancerListenerCRUD.class, o);
+        return answer;
+    }
+
+    public static INeutronLoadBalancerHealthMonitorCRUD getINeutronLoadBalancerHealthMonitorCRUD(Object o) {
+        INeutronLoadBalancerHealthMonitorCRUD answer = (INeutronLoadBalancerHealthMonitorCRUD) ServiceHelper.getGlobalInstance(INeutronLoadBalancerHealthMonitorCRUD.class, o);
+        return answer;
+    }
+
+    public static INeutronLoadBalancerPoolMemberCRUD getINeutronLoadBalancerPoolMemberCRUD(Object o) {
+        INeutronLoadBalancerPoolMemberCRUD answer = (INeutronLoadBalancerPoolMemberCRUD) ServiceHelper.getGlobalInstance(INeutronLoadBalancerPoolMemberCRUD.class, o);
+        return answer;
+    }
 }
\ No newline at end of file
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancer.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancer.java
new file mode 100644 (file)
index 0000000..15544f0
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import org.opendaylight.controller.configuration.ConfigurationObject;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * OpenStack Neutron v2.0 Load Balancer as a service
+ * (LBaaS) bindings. See OpenStack Network API
+ * v2.0 Reference for description of  the fields:
+ * Implemented fields are as follows:
+ *
+ * id                 uuid-str
+ * tenant_id          uuid-str
+ * name               String
+ * description        String
+ * status             String
+ * vip_address        IP address
+ * vip_subnet         uuid-str
+ * http://docs.openstack.org/api/openstack-network/2.0/openstack-network.pdf
+ */
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancer extends ConfigurationObject implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    @XmlElement(name="id")
+    String loadBalancerID;
+
+    @XmlElement (name="tenant_id")
+    String loadBalancerTenantID;
+
+    @XmlElement (name="name")
+    String loadBalancerName;
+
+    @XmlElement (name="description")
+    String loadBalancerDescription;
+
+    @XmlElement (name="status")
+    String loadBalancerStatus;
+
+    @XmlElement (name="vip_address")
+    String loadBalancerVipAddress;
+
+    @XmlElement (name="vip_subnet_id")
+    String loadBalancerVipSubnetID;
+
+    public String getLoadBalancerID() {
+        return loadBalancerID;
+    }
+
+    public void setLoadBalancerID(String loadBalancerID) {
+        this.loadBalancerID = loadBalancerID;
+    }
+
+    public String getLoadBalancerTenantID() {
+        return loadBalancerTenantID;
+    }
+
+    public void setLoadBalancerTenantID(String loadBalancerTenantID) {
+        this.loadBalancerTenantID = loadBalancerTenantID;
+    }
+
+    public String getLoadBalancerName() {
+        return loadBalancerName;
+    }
+
+    public void setLoadBalancerName(String loadBalancerName) {
+        this.loadBalancerName = loadBalancerName;
+    }
+
+    public String getLoadBalancerDescription() {
+        return loadBalancerDescription;
+    }
+
+    public void setLoadBalancerDescription(String loadBalancerDescription) {
+        this.loadBalancerDescription = loadBalancerDescription;
+    }
+
+    public String getLoadBalancerStatus() {
+        return loadBalancerStatus;
+    }
+
+    public void setLoadBalancerStatus(String loadBalancerStatus) {
+        this.loadBalancerStatus = loadBalancerStatus;
+    }
+
+    public String getLoadBalancerVipAddress() {
+        return loadBalancerVipAddress;
+    }
+
+    public void setLoadBalancerVipAddress(String loadBalancerVipAddress) {
+        this.loadBalancerVipAddress = loadBalancerVipAddress;
+    }
+
+    public String getLoadBalancerVipSubnetID() {
+        return loadBalancerVipSubnetID;
+    }
+
+    public void setLoadBalancerVipSubnetID(String loadBalancerVipSubnetID) {
+        this.loadBalancerVipSubnetID = loadBalancerVipSubnetID;
+    }
+
+    public NeutronLoadBalancer extractFields(List<String> fields) {
+        NeutronLoadBalancer ans = new NeutronLoadBalancer();
+        Iterator<String> i = fields.iterator();
+        while (i.hasNext()) {
+            String s = i.next();
+            if (s.equals("id")) {
+                ans.setLoadBalancerID(this.getLoadBalancerID());
+            }
+            if (s.equals("tenant_id")) {
+                ans.setLoadBalancerTenantID(this.getLoadBalancerTenantID());
+            }
+            if (s.equals("name")) {
+                ans.setLoadBalancerName(this.getLoadBalancerName());
+            }
+            if(s.equals("description")) {
+                ans.setLoadBalancerDescription(this.getLoadBalancerDescription());
+            }
+            if (s.equals("vip_address")) {
+                ans.setLoadBalancerVipAddress(this.getLoadBalancerVipAddress());
+            }
+            if (s.equals("vip_subnet_id")) {
+                ans.setLoadBalancerVipSubnetID(this.getLoadBalancerVipSubnetID());
+            }
+            if (s.equals("status")) {
+                ans.setLoadBalancerStatus(this.getLoadBalancerStatus());
+            }
+        }
+        return ans;
+    }
+
+    @Override public String toString() {
+        return "NeutronLoadBalancer{" +
+                "loadBalancerID='" + loadBalancerID + '\'' +
+                ", loadBalancerTenantID='" + loadBalancerTenantID + '\'' +
+                ", loadBalancerName='" + loadBalancerName + '\'' +
+                ", loadBalancerDescription='" + loadBalancerDescription + '\'' +
+                ", loadBalancerStatus='" + loadBalancerStatus + '\'' +
+                ", loadBalancerVipAddress='" + loadBalancerVipAddress + '\'' +
+                ", loadBalancerVipSubnetID='" + loadBalancerVipSubnetID + '\'' +
+                '}';
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerHealthMonitor.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerHealthMonitor.java
new file mode 100644 (file)
index 0000000..0e9e1af
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import org.opendaylight.controller.configuration.ConfigurationObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * OpenStack Neutron v2.0 Load Balancer as a service
+ * (LBaaS) bindings. See OpenStack Network API
+ * v2.0 Reference for description of  the fields:
+ * Implemented fields are as follows:
+ *
+ *
+ * id                 uuid-str
+ * tenant_id          uuid-str
+ * type               String
+ * delay              Integer
+ * timeout            Integer
+ * max_retries        Integer
+ * http_method        String
+ * url_path           String
+ * expected_codes     String
+ * admin_state_up     Boolean
+ * status             String
+ * http://docs.openstack.org/api/openstack-network/2.0/openstack-network.pdf
+ */
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerHealthMonitor extends ConfigurationObject implements Serializable {
+    private static final long serialVersionUID = 1L;
+    private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancer.class);
+
+    @XmlElement(name="id")
+    String loadBalancerHealthMonitorID;
+
+    @XmlElement (name="tenant_id")
+    String loadBalancerHealthMonitorTenantID;
+
+    @XmlElement (name="type")
+    String loadBalancerHealthMonitorType;
+
+    @XmlElement (name="delay")
+    Integer loadBalancerHealthMonitorDelay;
+
+    @XmlElement (name="timeout")
+    Integer loadBalancerHealthMonitorTimeout;
+
+    @XmlElement (name="max_retries")
+    Integer loadBalancerHealthMonitorMaxRetries;
+
+    @XmlElement (name="http_method")
+    String loadBalancerHealthMonitorHttpMethod;
+
+    @XmlElement (name="url_path")
+    String loadBalancerHealthMonitorUrlPath;
+
+    @XmlElement (name="expected_codes")
+    String loadBalancerHealthMonitorExpectedCodes;
+
+    @XmlElement (defaultValue="true", name="admin_state_up")
+    Boolean loadBalancerHealthMonitorAdminStateIsUp;
+
+    @XmlElement (name="status")
+    String loadBalancerHealthMonitorStatus;
+
+    public String getLoadBalancerHealthMonitorID() {
+        return loadBalancerHealthMonitorID;
+    }
+
+    public void setLoadBalancerHealthMonitorID(String loadBalancerHealthMonitorID) {
+        this.loadBalancerHealthMonitorID = loadBalancerHealthMonitorID;
+    }
+
+    public String getLoadBalancerHealthMonitorTenantID() {
+        return loadBalancerHealthMonitorTenantID;
+    }
+
+    public void setLoadBalancerHealthMonitorTenantID(String loadBalancerHealthMonitorTenantID) {
+        this.loadBalancerHealthMonitorTenantID = loadBalancerHealthMonitorTenantID;
+    }
+
+    public String getLoadBalancerHealthMonitorType() {
+        return loadBalancerHealthMonitorType;
+    }
+
+    public void setLoadBalancerHealthMonitorType(String loadBalancerHealthMonitorType) {
+        this.loadBalancerHealthMonitorType = loadBalancerHealthMonitorType;
+    }
+
+    public Integer getLoadBalancerHealthMonitorDelay() {
+        return loadBalancerHealthMonitorDelay;
+    }
+
+    public void setLoadBalancerHealthMonitorDelay(Integer loadBalancerHealthMonitorDelay) {
+        this.loadBalancerHealthMonitorDelay = loadBalancerHealthMonitorDelay;
+    }
+
+    public Integer getLoadBalancerHealthMonitorTimeout() {
+        return loadBalancerHealthMonitorTimeout;
+    }
+
+    public void setLoadBalancerHealthMonitorTimeout(Integer loadBalancerHealthMonitorTimeout) {
+        this.loadBalancerHealthMonitorTimeout = loadBalancerHealthMonitorTimeout;
+    }
+
+    public Integer getLoadBalancerHealthMonitorMaxRetries() {
+        return loadBalancerHealthMonitorMaxRetries;
+    }
+
+    public void setLoadBalancerHealthMonitorMaxRetries(Integer loadBalancerHealthMonitorMaxRetries) {
+        this.loadBalancerHealthMonitorMaxRetries = loadBalancerHealthMonitorMaxRetries;
+    }
+
+    public String getLoadBalancerHealthMonitorHttpMethod() {
+        return loadBalancerHealthMonitorHttpMethod;
+    }
+
+    public void setLoadBalancerHealthMonitorHttpMethod(String loadBalancerHealthMonitorHttpMethod) {
+        this.loadBalancerHealthMonitorHttpMethod = loadBalancerHealthMonitorHttpMethod;
+    }
+
+    public String getLoadBalancerHealthMonitorUrlPath() {
+        return loadBalancerHealthMonitorUrlPath;
+    }
+
+    public void setLoadBalancerHealthMonitorUrlPath(String loadBalancerHealthMonitorUrlPath) {
+        this.loadBalancerHealthMonitorUrlPath = loadBalancerHealthMonitorUrlPath;
+    }
+
+    public String getLoadBalancerHealthMonitorExpectedCodes() {
+        return loadBalancerHealthMonitorExpectedCodes;
+    }
+
+    public void setLoadBalancerHealthMonitorExpectedCodes(String loadBalancerHealthMonitorExpectedCodes) {
+        this.loadBalancerHealthMonitorExpectedCodes = loadBalancerHealthMonitorExpectedCodes;
+    }
+
+    public Boolean getLoadBalancerHealthMonitorAdminStateIsUp() {
+        return loadBalancerHealthMonitorAdminStateIsUp;
+    }
+
+    public void setLoadBalancerHealthMonitorAdminStateIsUp(Boolean loadBalancerHealthMonitorAdminStateIsUp) {
+        this.loadBalancerHealthMonitorAdminStateIsUp = loadBalancerHealthMonitorAdminStateIsUp;
+    }
+
+    public String getLoadBalancerHealthMonitorStatus() {
+        return loadBalancerHealthMonitorStatus;
+    }
+
+    public void setLoadBalancerHealthMonitorStatus(String loadBalancerHealthMonitorStatus) {
+        this.loadBalancerHealthMonitorStatus = loadBalancerHealthMonitorStatus;
+    }
+
+    public NeutronLoadBalancerHealthMonitor extractFields(List<String> fields) {
+        NeutronLoadBalancerHealthMonitor ans = new NeutronLoadBalancerHealthMonitor();
+        Iterator<String> i = fields.iterator();
+        while (i.hasNext()) {
+            String s = i.next();
+            if (s.equals("id")) {
+                ans.setLoadBalancerHealthMonitorID(this.getLoadBalancerHealthMonitorID());
+            }
+            if (s.equals("tenant_id")) {
+                ans.setLoadBalancerHealthMonitorTenantID(this.getLoadBalancerHealthMonitorTenantID());
+            }
+            if (s.equals("type")) {
+                ans.setLoadBalancerHealthMonitorType(this.getLoadBalancerHealthMonitorType());
+            }
+            if (s.equals("delay")) {
+                ans.setLoadBalancerHealthMonitorDelay(this.getLoadBalancerHealthMonitorDelay());
+            }
+            if (s.equals("timeout")) {
+                ans.setLoadBalancerHealthMonitorTimeout(this.getLoadBalancerHealthMonitorTimeout());
+            }
+            if (s.equals("max_retries")) {
+                ans.setLoadBalancerHealthMonitorMaxRetries(this.getLoadBalancerHealthMonitorMaxRetries());
+            }
+            if (s.equals("http_method")) {
+                ans.setLoadBalancerHealthMonitorHttpMethod(this.getLoadBalancerHealthMonitorHttpMethod());
+            }
+            if(s.equals("url_path")) {
+                ans.setLoadBalancerHealthMonitorUrlPath(this.getLoadBalancerHealthMonitorUrlPath());
+            }
+            if (s.equals("expected_codes")) {
+                ans.setLoadBalancerHealthMonitorExpectedCodes(this.getLoadBalancerHealthMonitorExpectedCodes());
+            }
+            if (s.equals("admin_state_up")) {
+                ans.setLoadBalancerHealthMonitorAdminStateIsUp(loadBalancerHealthMonitorAdminStateIsUp);
+            }
+            if (s.equals("status")) {
+                ans.setLoadBalancerHealthMonitorStatus(this.getLoadBalancerHealthMonitorStatus());
+            }
+        }
+        return ans;
+    }
+
+    @Override public String toString() {
+        return "NeutronLoadBalancerHealthMonitor{" +
+                "loadBalancerHealthMonitorID='" + loadBalancerHealthMonitorID + '\'' +
+                ", loadBalancerHealthMonitorTenantID='" + loadBalancerHealthMonitorTenantID + '\'' +
+                ", loadBalancerHealthMonitorType='" + loadBalancerHealthMonitorType + '\'' +
+                ", loadBalancerHealthMonitorDelay=" + loadBalancerHealthMonitorDelay +
+                ", loadBalancerHealthMonitorTimeout=" + loadBalancerHealthMonitorTimeout +
+                ", loadBalancerHealthMonitorMaxRetries=" + loadBalancerHealthMonitorMaxRetries +
+                ", loadBalancerHealthMonitorHttpMethod='" + loadBalancerHealthMonitorHttpMethod + '\'' +
+                ", loadBalancerHealthMonitorUrlPath='" + loadBalancerHealthMonitorUrlPath + '\'' +
+                ", loadBalancerHealthMonitorExpectedCodes='" + loadBalancerHealthMonitorExpectedCodes + '\'' +
+                ", loadBalancerHealthMonitorAdminStateIsUp=" + loadBalancerHealthMonitorAdminStateIsUp +
+                ", loadBalancerHealthMonitorStatus='" + loadBalancerHealthMonitorStatus + '\'' +
+                '}';
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerListener.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerListener.java
new file mode 100644 (file)
index 0000000..3989709
--- /dev/null
@@ -0,0 +1,227 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import org.opendaylight.controller.configuration.ConfigurationObject;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * OpenStack Neutron v2.0 Load Balancer as a service
+ * (LBaaS) bindings. See OpenStack Network API
+ * v2.0 Reference for description of  the fields:
+ * Implemented fields are as follows:
+ *
+ * id                 uuid-str
+ * default_pool_id    String
+ * tenant_id          uuid-str
+ * name               String
+ * description        String
+ * shared             Bool
+ * protocol           String
+ * protocol_port      String
+ * load_balancer_id   String
+ * admin_state_up     Boolean
+ * status             String
+ *
+ * http://docs.openstack.org/api/openstack-network/2.0/openstack-network.pdf
+ */
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerListener extends ConfigurationObject implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    @XmlElement(name="id")
+    String loadBalancerListenerID;
+
+    @XmlElement (name="default_pool_id")
+    String neutronLoadBalancerListenerDefaultPoolID;
+
+    @XmlElement (name="tenant_id")
+    String loadBalancerListenerTenantID;
+
+    @XmlElement (name="name")
+    String loadBalancerListenerName;
+
+    @XmlElement (name="description")
+    String loadBalancerListenerDescription;
+
+    @XmlElement (defaultValue="true", name="admin_state_up")
+    Boolean loadBalancerListenerAdminStateIsUp;
+
+    @XmlElement (name="status")
+    String loadBalancerListenerStatus;
+
+    @XmlElement (defaultValue="false", name="shared")
+    Boolean loadBalancerListenerIsShared;
+
+    @XmlElement (name="protocol")
+    String neutronLoadBalancerListenerProtocol;
+
+    @XmlElement (name="protocol_port")
+    String neutronLoadBalancerListenerProtocolPort;
+
+    @XmlElement (name="load_balancer_id")
+    String neutronLoadBalancerListenerLoadBalancerID;
+
+
+    public String getLoadBalancerListenerID() {
+        return loadBalancerListenerID;
+    }
+
+    public void setLoadBalancerListenerID(String loadBalancerListenerID) {
+        this.loadBalancerListenerID = loadBalancerListenerID;
+    }
+
+    public String getLoadBalancerListenerTenantID() {
+        return loadBalancerListenerTenantID;
+    }
+
+    public void setLoadBalancerListenerTenantID(String loadBalancerListenerTenantID) {
+        this.loadBalancerListenerTenantID = loadBalancerListenerTenantID;
+    }
+
+    public String getLoadBalancerListenerName() {
+        return loadBalancerListenerName;
+    }
+
+    public void setLoadBalancerListenerName(String loadBalancerListenerName) {
+        this.loadBalancerListenerName = loadBalancerListenerName;
+    }
+
+    public String getLoadBalancerListenerDescription() {
+        return loadBalancerListenerDescription;
+    }
+
+    public void setLoadBalancerListenerDescription(String loadBalancerListenerDescription) {
+        this.loadBalancerListenerDescription = loadBalancerListenerDescription;
+    }
+
+    public Boolean getLoadBalancerListenerAdminStateIsUp() {
+        return loadBalancerListenerAdminStateIsUp;
+    }
+
+    public void setLoadBalancerListenerAdminStateIsUp(Boolean loadBalancerListenerAdminStateIsUp) {
+        this.loadBalancerListenerAdminStateIsUp = loadBalancerListenerAdminStateIsUp;
+    }
+
+    public String getLoadBalancerListenerStatus() {
+        return loadBalancerListenerStatus;
+    }
+
+    public void setLoadBalancerListenerStatus(String loadBalancerListenerStatus) {
+        this.loadBalancerListenerStatus = loadBalancerListenerStatus;
+    }
+
+    public Boolean getLoadBalancerListenerIsShared() {
+        return loadBalancerListenerIsShared;
+    }
+
+    public void setLoadBalancerListenerIsShared(Boolean loadBalancerListenerIsShared) {
+        this.loadBalancerListenerIsShared = loadBalancerListenerIsShared;
+    }
+
+    public String getNeutronLoadBalancerListenerProtocol() {
+        return neutronLoadBalancerListenerProtocol;
+    }
+
+    public void setNeutronLoadBalancerListenerProtocol(String neutronLoadBalancerListenerProtocol) {
+        this.neutronLoadBalancerListenerProtocol = neutronLoadBalancerListenerProtocol;
+    }
+
+    public String getNeutronLoadBalancerListenerProtocolPort() {
+        return neutronLoadBalancerListenerProtocolPort;
+    }
+
+    public void setNeutronLoadBalancerListenerProtocolPort(String neutronLoadBalancerListenerProtocolPort) {
+        this.neutronLoadBalancerListenerProtocolPort = neutronLoadBalancerListenerProtocolPort;
+    }
+
+    public String getNeutronLoadBalancerListenerDefaultPoolID() {
+        return neutronLoadBalancerListenerDefaultPoolID;
+    }
+
+    public void setNeutronLoadBalancerListenerDefaultPoolID(String neutronLoadBalancerListenerDefaultPoolID) {
+        this.neutronLoadBalancerListenerDefaultPoolID = neutronLoadBalancerListenerDefaultPoolID;
+    }
+
+    public String getNeutronLoadBalancerListenerLoadBalancerID() {
+        return neutronLoadBalancerListenerLoadBalancerID;
+    }
+
+    public void setNeutronLoadBalancerListenerLoadBalancerID(String neutronLoadBalancerListenerLoadBalancerID) {
+        this.neutronLoadBalancerListenerLoadBalancerID = neutronLoadBalancerListenerLoadBalancerID;
+    }
+
+    public NeutronLoadBalancerListener extractFields(List<String> fields) {
+        NeutronLoadBalancerListener ans = new NeutronLoadBalancerListener();
+        Iterator<String> i = fields.iterator();
+        while (i.hasNext()) {
+            String s = i.next();
+            if (s.equals("id")) {
+                ans.setLoadBalancerListenerID(this.getLoadBalancerListenerID());
+            }
+            if(s.equals("default_pool_id")) {
+                ans.setNeutronLoadBalancerListenerDefaultPoolID(this.getNeutronLoadBalancerListenerDefaultPoolID());
+            }
+            if (s.equals("tenant_id")) {
+                ans.setLoadBalancerListenerTenantID(this.getLoadBalancerListenerTenantID());
+            }
+            if (s.equals("name")) {
+                ans.setLoadBalancerListenerName(this.getLoadBalancerListenerName());
+            }
+            if(s.equals("description")) {
+                ans.setLoadBalancerListenerDescription(this.getLoadBalancerListenerDescription());
+            }
+            if (s.equals("shared")) {
+                ans.setLoadBalancerListenerIsShared(loadBalancerListenerIsShared);
+            }
+            if (s.equals("protocol")) {
+                ans.setNeutronLoadBalancerListenerProtocol(this.getNeutronLoadBalancerListenerProtocol());
+            }
+            if (s.equals("protocol_port")) {
+                ans.setNeutronLoadBalancerListenerProtocolPort(this.getNeutronLoadBalancerListenerProtocolPort());
+            }
+            if (s.equals("load_balancer_id")) {
+                ans.setNeutronLoadBalancerListenerLoadBalancerID(this.getNeutronLoadBalancerListenerLoadBalancerID());
+            }
+            if (s.equals("admin_state_up")) {
+                ans.setLoadBalancerListenerAdminStateIsUp(loadBalancerListenerAdminStateIsUp);
+            }
+            if (s.equals("status")) {
+                ans.setLoadBalancerListenerStatus(this.getLoadBalancerListenerStatus());
+            }
+        }
+        return ans;
+    }
+
+    @Override public String toString() {
+        return "NeutronLoadBalancerListener{" +
+                "loadBalancerListenerID='" + loadBalancerListenerID + '\'' +
+                ", neutronLoadBalancerListenerDefaultPoolID='" + neutronLoadBalancerListenerDefaultPoolID + '\'' +
+                ", loadBalancerListenerTenantID='" + loadBalancerListenerTenantID + '\'' +
+                ", loadBalancerListenerName='" + loadBalancerListenerName + '\'' +
+                ", loadBalancerListenerDescription='" + loadBalancerListenerDescription + '\'' +
+                ", loadBalancerListenerAdminStateIsUp=" + loadBalancerListenerAdminStateIsUp +
+                ", loadBalancerListenerStatus='" + loadBalancerListenerStatus + '\'' +
+                ", loadBalancerListenerIsShared=" + loadBalancerListenerIsShared +
+                ", neutronLoadBalancerListenerProtocol='" + neutronLoadBalancerListenerProtocol + '\'' +
+                ", neutronLoadBalancerListenerProtocolPort='" + neutronLoadBalancerListenerProtocolPort + '\'' +
+                ", neutronLoadBalancerListenerLoadBalancerID='" + neutronLoadBalancerListenerLoadBalancerID + '\'' +
+                '}';
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPool.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPool.java
new file mode 100644 (file)
index 0000000..12c80fe
--- /dev/null
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import org.opendaylight.controller.configuration.ConfigurationObject;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * OpenStack Neutron v2.0 Load Balancer as a service
+ * (LBaaS) bindings. See OpenStack Network API
+ * v2.0 Reference for description of  the fields:
+ * Implemented fields are as follows:
+ *
+ * id                 uuid-str
+ * tenant_id          uuid-str
+ * name               String
+ * description        String
+ * protocol           String
+ * lb_algorithm       String
+ * healthmonitor_id   String
+ * admin_state_up     Bool
+ * status             String
+ * members            List <String>
+ * http://docs.openstack.org/api/openstack-network/2.0/openstack-network.pdf
+ */
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerPool extends ConfigurationObject implements Serializable {
+    private static final long serialVersionUID = 1L;
+
+    @XmlElement(name="id")
+    String loadBalancerPoolID;
+
+    @XmlElement (name="tenant_id")
+    String loadBalancerPoolTenantID;
+
+    @XmlElement (name="name")
+    String loadBalancerPoolName;
+
+    @XmlElement (name="description")
+    String loadBalancerPoolDescription;
+
+    @XmlElement (name="protocol")
+    String loadBalancerPoolProtocol;
+
+    @XmlElement (name="lb_algorithm")
+    String loadBalancerPoolLbAlgorithm;
+
+    @XmlElement (name="healthmonitor_id")
+    String neutronLoadBalancerPoolHealthMonitorID;
+
+    @XmlElement (defaultValue="true", name="admin_state_up")
+    Boolean loadBalancerPoolAdminStateIsUp;
+
+    @XmlElement (name="status")
+    String loadBalancerPoolStatus;
+
+    @XmlElement (name="members")
+    List loadBalancerPoolMembers;
+
+    HashMap<String, NeutronLoadBalancerPoolMember> member;
+
+    public NeutronLoadBalancerPool() {
+        member = new HashMap<String, NeutronLoadBalancerPoolMember>();
+    }
+
+    public String getLoadBalancerPoolID() {
+        return loadBalancerPoolID;
+    }
+
+    public void setLoadBalancerPoolID(String loadBalancerPoolID) {
+        this.loadBalancerPoolID = loadBalancerPoolID;
+    }
+
+    public String getLoadBalancerPoolTenantID() {
+        return loadBalancerPoolTenantID;
+    }
+
+    public void setLoadBalancerPoolTenantID(String loadBalancerPoolTenantID) {
+        this.loadBalancerPoolTenantID = loadBalancerPoolTenantID;
+    }
+
+    public String getLoadBalancerPoolName() {
+        return loadBalancerPoolName;
+    }
+
+    public void setLoadBalancerPoolName(String loadBalancerPoolName) {
+        this.loadBalancerPoolName = loadBalancerPoolName;
+    }
+
+    public String getLoadBalancerPoolDescription() {
+        return loadBalancerPoolDescription;
+    }
+
+    public void setLoadBalancerPoolDescription(String loadBalancerPoolDescription) {
+        this.loadBalancerPoolDescription = loadBalancerPoolDescription;
+    }
+
+    public String getLoadBalancerPoolProtocol() {
+        return loadBalancerPoolProtocol;
+    }
+
+    public void setLoadBalancerPoolProtocol(String loadBalancerPoolProtocol) {
+        this.loadBalancerPoolProtocol = loadBalancerPoolProtocol;
+    }
+
+    public String getLoadBalancerPoolLbAlgorithm() {
+        return loadBalancerPoolLbAlgorithm;
+    }
+
+    public void setLoadBalancerPoolLbAlgorithm(String loadBalancerPoolLbAlgorithm) {
+        this.loadBalancerPoolLbAlgorithm = loadBalancerPoolLbAlgorithm;
+    }
+
+    public String getNeutronLoadBalancerPoolHealthMonitorID() {
+        return neutronLoadBalancerPoolHealthMonitorID;
+    }
+
+    public void setNeutronLoadBalancerPoolHealthMonitorID(String neutronLoadBalancerPoolHealthMonitorID) {
+        this.neutronLoadBalancerPoolHealthMonitorID = neutronLoadBalancerPoolHealthMonitorID;
+    }
+
+    public Boolean getLoadBalancerPoolAdminIsStateIsUp() {
+        return loadBalancerPoolAdminStateIsUp;
+    }
+
+    public void setLoadBalancerPoolAdminStateIsUp(Boolean loadBalancerPoolAdminStateIsUp) {
+        this.loadBalancerPoolAdminStateIsUp = loadBalancerPoolAdminStateIsUp;
+    }
+
+    public String getLoadBalancerPoolStatus() {
+        return loadBalancerPoolStatus;
+    }
+
+    public void setLoadBalancerPoolStatus(String loadBalancerPoolStatus) {
+        this.loadBalancerPoolStatus = loadBalancerPoolStatus;
+    }
+
+    public List getLoadBalancerPoolMembers() {
+        return loadBalancerPoolMembers;
+    }
+
+    public void setLoadBalancerPoolMembers(List loadBalancerPoolMembers) {
+        this.loadBalancerPoolMembers = loadBalancerPoolMembers;
+    }
+
+    public NeutronLoadBalancerPool extractFields(List<String> fields) {
+        NeutronLoadBalancerPool ans = new NeutronLoadBalancerPool();
+        Iterator<String> i = fields.iterator();
+        while (i.hasNext()) {
+            String s = i.next();
+            if (s.equals("id")) {
+                ans.setLoadBalancerPoolID(this.getLoadBalancerPoolID());
+            }
+            if (s.equals("tenant_id")) {
+                ans.setLoadBalancerPoolTenantID(this.getLoadBalancerPoolTenantID());
+            }
+            if (s.equals("name")) {
+                ans.setLoadBalancerPoolName(this.getLoadBalancerPoolName());
+            }
+            if(s.equals("description")) {
+                ans.setLoadBalancerPoolDescription(this.getLoadBalancerPoolDescription());
+            }
+            if(s.equals("protocol")) {
+                ans.setLoadBalancerPoolProtocol(this.getLoadBalancerPoolProtocol());
+            }
+            if(s.equals("lb_algorithm")) {
+                ans.setLoadBalancerPoolLbAlgorithm(this.getLoadBalancerPoolLbAlgorithm());
+            }
+            if(s.equals("healthmonitor_id")) {
+                ans.setNeutronLoadBalancerPoolHealthMonitorID(this.getNeutronLoadBalancerPoolHealthMonitorID());
+            }
+            if (s.equals("admin_state_up")) {
+                ans.setLoadBalancerPoolAdminStateIsUp(loadBalancerPoolAdminStateIsUp);
+            }
+            if (s.equals("status")) {
+                ans.setLoadBalancerPoolStatus(this.getLoadBalancerPoolStatus());
+            }
+            if (s.equals("members")) {
+                ans.setLoadBalancerPoolMembers(getLoadBalancerPoolMembers());
+            }
+        }
+        return ans;
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPoolMember.java b/opendaylight/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/NeutronLoadBalancerPoolMember.java
new file mode 100644 (file)
index 0000000..577c3bb
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron;
+
+import org.opendaylight.controller.configuration.ConfigurationObject;
+
+import javax.xml.bind.annotation.XmlElement;
+import java.io.Serializable;
+import java.util.Iterator;
+import java.util.List;
+
+public class NeutronLoadBalancerPoolMember  extends ConfigurationObject implements Serializable {
+
+    private static final long serialVersionUID = 1L;
+
+    /**
+     * TODO: Plumb into LBaaS Pool. Members are nested underneath Pool CRUD.
+     */
+    @XmlElement (name="id")
+    String poolMemberID;
+
+    @XmlElement (name="tenant_id")
+    String poolMemberTenantID;
+
+    @XmlElement (name="address")
+    String poolMemberAddress;
+
+    @XmlElement (name="protocol_port")
+    Integer poolMemberProtoPort;
+
+    @XmlElement (name="admin_state_up")
+    Boolean poolMemberAdminStateIsUp;
+
+    @XmlElement (name="weight")
+    Integer poolMemberWeight;
+
+    @XmlElement (name="subnet_id")
+    String poolMemberSubnetID;
+
+    @XmlElement (name="status")
+    String poolMemberStatus;
+
+    public NeutronLoadBalancerPoolMember() {
+    }
+
+    public String getPoolMemberID() {
+        return poolMemberID;
+    }
+
+    public void setPoolMemberID(String poolMemberID) {
+        this.poolMemberID = poolMemberID;
+    }
+
+    public String getPoolMemberTenantID() {
+        return poolMemberTenantID;
+    }
+
+    public void setPoolMemberTenantID(String poolMemberTenantID) {
+        this.poolMemberTenantID = poolMemberTenantID;
+    }
+
+    public String getPoolMemberAddress() {
+        return poolMemberAddress;
+    }
+
+    public void setPoolMemberAddress(String poolMemberAddress) {
+        this.poolMemberAddress = poolMemberAddress;
+    }
+
+    public Integer getPoolMemberProtoPort() {
+        return poolMemberProtoPort;
+    }
+
+    public void setPoolMemberProtoPort(Integer poolMemberProtoPort) {
+        this.poolMemberProtoPort = poolMemberProtoPort;
+    }
+
+    public Boolean getPoolMemberAdminStateIsUp() {
+        return poolMemberAdminStateIsUp;
+    }
+
+    public void setPoolMemberAdminStateIsUp(Boolean poolMemberAdminStateIsUp) {
+        this.poolMemberAdminStateIsUp = poolMemberAdminStateIsUp;
+    }
+
+    public Integer getPoolMemberWeight() {
+        return poolMemberWeight;
+    }
+
+    public void setPoolMemberWeight(Integer poolMemberWeight) {
+        this.poolMemberWeight = poolMemberWeight;
+    }
+
+    public String getPoolMemberSubnetID() {
+        return poolMemberSubnetID;
+    }
+
+    public void setPoolMemberSubnetID(String poolMemberSubnetID) {
+        this.poolMemberSubnetID = poolMemberSubnetID;
+    }
+
+    public String getPoolMemberStatus() {
+        return poolMemberStatus;
+    }
+
+    public void setPoolMemberStatus(String poolMemberStatus) {
+        this.poolMemberStatus = poolMemberStatus;
+    }
+
+    public NeutronLoadBalancerPoolMember extractFields(List<String> fields) {
+        NeutronLoadBalancerPoolMember ans = new NeutronLoadBalancerPoolMember();
+        Iterator<String> i = fields.iterator();
+        while (i.hasNext()) {
+            String s = i.next();
+            if (s.equals("id")) {
+                ans.setPoolMemberID(this.getPoolMemberID());
+            }
+            if (s.equals("tenant_id")) {
+                ans.setPoolMemberTenantID(this.getPoolMemberTenantID());
+            }
+            if (s.equals("address")) {
+                ans.setPoolMemberAddress(this.getPoolMemberAddress());
+            }
+            if(s.equals("protocol_port")) {
+                ans.setPoolMemberProtoPort(this.getPoolMemberProtoPort());
+            }
+            if (s.equals("admin_state_up")) {
+                ans.setPoolMemberAdminStateIsUp(poolMemberAdminStateIsUp);
+            }
+            if(s.equals("weight")) {
+                ans.setPoolMemberWeight(this.getPoolMemberWeight());
+            }
+            if(s.equals("subnet_id")) {
+                ans.setPoolMemberSubnetID(this.getPoolMemberSubnetID());
+            }
+            if (s.equals("status")) {
+                ans.setPoolMemberStatus(this.getPoolMemberStatus());
+            }
+        }
+        return ans;
+    }
+    @Override public String toString() {
+        return "NeutronLoadBalancerPoolMember{" +
+                "poolMemberID='" + poolMemberID + '\'' +
+                ", poolMemberTenantID='" + poolMemberTenantID + '\'' +
+                ", poolMemberAddress='" + poolMemberAddress + '\'' +
+                ", poolMemberProtoPort=" + poolMemberProtoPort +
+                ", poolMemberAdminStateIsUp=" + poolMemberAdminStateIsUp +
+                ", poolMemberWeight=" + poolMemberWeight +
+                ", poolMemberSubnetID='" + poolMemberSubnetID + '\'' +
+                ", poolMemberStatus='" + poolMemberStatus + '\'' +
+                '}';
+    }
+}
index b32b01cb3f3015930510d22fd1a39836a25a0cbb..803e5e8d41ff8af5d1e51ffc09abaa364ad214cc 100644 (file)
@@ -235,6 +235,11 @@ public class NeutronPort extends ConfigurationObject implements Serializable, IN
             if (s.equals("tenant_id")) {
                 ans.setTenantID(this.getTenantID());
             }
+            if (s.equals("security_groups")) {
+                List<NeutronSecurityGroup> securityGroups = new ArrayList<NeutronSecurityGroup>();
+                securityGroups.addAll(this.getSecurityGroups());
+                ans.setSecurityGroups(securityGroups);
+            }
         }
         return ans;
     }
diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/INeutronLoadBalancerPoolMemberRequest.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/INeutronLoadBalancerPoolMemberRequest.java
new file mode 100644 (file)
index 0000000..9d66163
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
+
+import javax.xml.bind.annotation.XmlElement;
+import java.util.List;
+
+public class INeutronLoadBalancerPoolMemberRequest {
+
+    /**
+     * See OpenStack Network API v2.0 Reference for description of
+     * http://docs.openstack.org/api/openstack-network/2.0/content/
+     */
+
+    @XmlElement(name="member")
+    NeutronLoadBalancerPoolMember singletonLoadBalancerPoolMember;
+
+    @XmlElement(name="members")
+    List<NeutronLoadBalancerPoolMember> bulkRequest;
+
+    INeutronLoadBalancerPoolMemberRequest() {
+    }
+
+    INeutronLoadBalancerPoolMemberRequest(List<NeutronLoadBalancerPoolMember> bulk) {
+        bulkRequest = bulk;
+        singletonLoadBalancerPoolMember = null;
+    }
+
+    INeutronLoadBalancerPoolMemberRequest(NeutronLoadBalancerPoolMember group) {
+        singletonLoadBalancerPoolMember = group;
+    }
+
+    public List<NeutronLoadBalancerPoolMember> getBulk() {
+        return bulkRequest;
+    }
+
+    public NeutronLoadBalancerPoolMember getSingleton() {
+        return singletonLoadBalancerPoolMember;
+    }
+
+    public boolean isSingleton() {
+        return (singletonLoadBalancerPoolMember != null);
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerHealthMonitorNorthbound.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerHealthMonitorNorthbound.java
new file mode 100644 (file)
index 0000000..6cd2673
--- /dev/null
@@ -0,0 +1,410 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+
+import org.codehaus.enunciate.jaxrs.ResponseCode;
+import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerHealthMonitorAware;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerHealthMonitorCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancer;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerHealthMonitor;
+import org.opendaylight.controller.northbound.commons.RestMessages;
+import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
+import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
+import org.opendaylight.controller.sal.utils.ServiceHelper;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Neutron Northbound REST APIs for Load Balancer HealthMonitor.<br>
+ * This class provides REST APIs for managing neutron LoadBalancerHealthMonitor
+ *
+ * <br>
+ * <br>
+ * Authentication scheme : <b>HTTP Basic</b><br>
+ * Authentication realm : <b>opendaylight</b><br>
+ * Transport : <b>HTTP and HTTPS</b><br>
+ * <br>
+ * HTTPS Authentication is disabled by default. Administrator can enable it in
+ * tomcat-server.xml after adding a proper keystore / SSL certificate from a
+ * trusted authority.<br>
+ * More info :
+ * http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
+ *
+ */
+@Path("/healthmonitors")
+public class NeutronLoadBalancerHealthMonitorNorthbound {
+    private static final Logger logger = LoggerFactory.getLogger(NeutronLoadBalancer.class);
+
+    private NeutronLoadBalancerHealthMonitor extractFields(NeutronLoadBalancerHealthMonitor o, List<String> fields) {
+        return o.extractFields(fields);
+    }
+
+    /**
+     * Returns a list of all LoadBalancerHealthMonitor */
+    @GET
+    @Produces({ MediaType.APPLICATION_JSON })
+    @StatusCodes({
+            @ResponseCode(code = 200, condition = "Operation successful"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+
+    public Response listGroups(
+            // return fields
+            @QueryParam("fields") List<String> fields,
+            // OpenStack LoadBalancerHealthMonitor attributes
+            @QueryParam("id") String queryLoadBalancerHealthMonitorID,
+            @QueryParam("tenant_id") String queryLoadBalancerHealthMonitorTenantID,
+            // TODO "type" is being a property by the JSON parser.
+            @QueryParam("type") String queryLoadBalancerHealthMonitorType,
+            @QueryParam("delay") Integer queryLoadBalancerHealthMonitorDelay,
+            @QueryParam("timeout") Integer queryLoadBalancerHealthMonitorTimeout,
+            @QueryParam("max_retries") Integer queryLoadBalancerHealthMonitorMaxRetries,
+            @QueryParam("http_method") String queryLoadBalancerHealthMonitorHttpMethod,
+            @QueryParam("url_path") String queryLoadBalancerHealthMonitorUrlPath,
+            @QueryParam("expected_codes") String queryLoadBalancerHealthMonitorExpectedCodes,
+            @QueryParam("admin_state_up") Boolean queryLoadBalancerHealthMonitorIsAdminStateUp,
+            @QueryParam("status") String queryLoadBalancerHealthMonitorStatus,
+            // pagination
+            @QueryParam("limit") String limit,
+            @QueryParam("marker") String marker,
+            @QueryParam("page_reverse") String pageReverse
+            // sorting not supported
+    ) {
+        INeutronLoadBalancerHealthMonitorCRUD loadBalancerHealthMonitorInterface = NeutronCRUDInterfaces
+                .getINeutronLoadBalancerHealthMonitorCRUD(this);
+        if (loadBalancerHealthMonitorInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerHealthMonitor CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+        List<NeutronLoadBalancerHealthMonitor> allLoadBalancerHealthMonitors = loadBalancerHealthMonitorInterface.getAllNeutronLoadBalancerHealthMonitors();
+        List<NeutronLoadBalancerHealthMonitor> ans = new ArrayList<NeutronLoadBalancerHealthMonitor>();
+        Iterator<NeutronLoadBalancerHealthMonitor> i = allLoadBalancerHealthMonitors.iterator();
+        while (i.hasNext()) {
+            NeutronLoadBalancerHealthMonitor nsg = i.next();
+            if ((queryLoadBalancerHealthMonitorID == null ||
+                    queryLoadBalancerHealthMonitorID.equals(nsg.getLoadBalancerHealthMonitorID())) &&
+                    (queryLoadBalancerHealthMonitorTenantID == null ||
+                            queryLoadBalancerHealthMonitorTenantID.equals
+                                    (nsg.getLoadBalancerHealthMonitorTenantID())) &&
+                    (queryLoadBalancerHealthMonitorType == null ||
+                            queryLoadBalancerHealthMonitorType.equals
+                                    (nsg.getLoadBalancerHealthMonitorType())) &&
+                    (queryLoadBalancerHealthMonitorDelay == null ||
+                            queryLoadBalancerHealthMonitorDelay.equals
+                                    (nsg.getLoadBalancerHealthMonitorDelay())) &&
+                    (queryLoadBalancerHealthMonitorTimeout == null ||
+                            queryLoadBalancerHealthMonitorTimeout.equals
+                                    (nsg.getLoadBalancerHealthMonitorTimeout())) &&
+                    (queryLoadBalancerHealthMonitorMaxRetries == null ||
+                            queryLoadBalancerHealthMonitorMaxRetries.equals
+                                    (nsg.getLoadBalancerHealthMonitorMaxRetries())) &&
+                    (queryLoadBalancerHealthMonitorHttpMethod == null ||
+                            queryLoadBalancerHealthMonitorHttpMethod.equals
+                                    (nsg.getLoadBalancerHealthMonitorHttpMethod())) &&
+                    (queryLoadBalancerHealthMonitorUrlPath == null ||
+                            queryLoadBalancerHealthMonitorUrlPath.equals
+                                    (nsg.getLoadBalancerHealthMonitorUrlPath())) &&
+                    (queryLoadBalancerHealthMonitorExpectedCodes == null ||
+                            queryLoadBalancerHealthMonitorExpectedCodes.equals
+                                    (nsg.getLoadBalancerHealthMonitorExpectedCodes())) &&
+                    (queryLoadBalancerHealthMonitorIsAdminStateUp == null ||
+                            queryLoadBalancerHealthMonitorIsAdminStateUp.equals
+                                    (nsg.getLoadBalancerHealthMonitorAdminStateIsUp())) &&
+                    (queryLoadBalancerHealthMonitorStatus == null ||
+                            queryLoadBalancerHealthMonitorStatus.equals
+                                    (nsg.getLoadBalancerHealthMonitorStatus()))) {
+                if (fields.size() > 0) {
+                    ans.add(extractFields(nsg,fields));
+                } else {
+                    ans.add(nsg);
+                }
+            }
+        }
+        return Response.status(200).entity(
+                new NeutronLoadBalancerHealthMonitorRequest(ans)).build();
+    }
+
+    /**
+     * Returns a specific LoadBalancerHealthMonitor */
+
+    @Path("{loadBalancerHealthMonitorID}")
+    @GET
+    @Produces({ MediaType.APPLICATION_JSON })
+    @StatusCodes({
+            @ResponseCode(code = 200, condition = "Operation successful"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response showLoadBalancerHealthMonitor(@PathParam("loadBalancerHealthMonitorID") String loadBalancerHealthMonitorID,
+            // return fields
+            @QueryParam("fields") List<String> fields) {
+        INeutronLoadBalancerHealthMonitorCRUD loadBalancerHealthMonitorInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerHealthMonitorCRUD(this);
+        if (loadBalancerHealthMonitorInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerHealthMonitor CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+        if (!loadBalancerHealthMonitorInterface.neutronLoadBalancerHealthMonitorExists(loadBalancerHealthMonitorID)) {
+            throw new ResourceNotFoundException("LoadBalancerHealthMonitor UUID does not exist.");
+        }
+        if (fields.size() > 0) {
+            NeutronLoadBalancerHealthMonitor ans = loadBalancerHealthMonitorInterface.getNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID);
+            return Response.status(200).entity(
+                    new NeutronLoadBalancerHealthMonitorRequest(extractFields(ans, fields))).build();
+        } else {
+            return Response.status(200).entity(new NeutronLoadBalancerHealthMonitorRequest(loadBalancerHealthMonitorInterface.getNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID))).build();
+        }
+    }
+
+    /**
+     * Creates new LoadBalancerHealthMonitor */
+
+    @POST
+    @Produces({ MediaType.APPLICATION_JSON })
+    @Consumes({ MediaType.APPLICATION_JSON })
+    @StatusCodes({
+            @ResponseCode(code = 201, condition = "Created"),
+            @ResponseCode(code = 400, condition = "Bad Request"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 403, condition = "Forbidden"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 409, condition = "Conflict"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response createLoadBalancerHealthMonitors(final NeutronLoadBalancerHealthMonitorRequest input) {
+        INeutronLoadBalancerHealthMonitorCRUD loadBalancerHealthMonitorInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerHealthMonitorCRUD(this);
+        if (loadBalancerHealthMonitorInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerHealthMonitor CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+        if (input.isSingleton()) {
+            NeutronLoadBalancerHealthMonitor singleton = input.getSingleton();
+
+            /*
+             *  Verify that the LoadBalancerHealthMonitor doesn't already exist.
+             */
+            if (loadBalancerHealthMonitorInterface.neutronLoadBalancerHealthMonitorExists(singleton.getLoadBalancerHealthMonitorID())) {
+                throw new BadRequestException("LoadBalancerHealthMonitor UUID already exists");
+            }
+            loadBalancerHealthMonitorInterface.addNeutronLoadBalancerHealthMonitor(singleton);
+
+            Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerHealthMonitorAware.class, this, null);
+            if (instances != null) {
+                for (Object instance : instances) {
+                    INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+                    int status = service.canCreateNeutronLoadBalancerHealthMonitor(singleton);
+                    if (status < 200 || status > 299) {
+                        return Response.status(status).build();
+                    }
+                }
+            }
+            loadBalancerHealthMonitorInterface.addNeutronLoadBalancerHealthMonitor(singleton);
+            if (instances != null) {
+                for (Object instance : instances) {
+                    INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+                    service.neutronLoadBalancerHealthMonitorCreated(singleton);
+                }
+            }
+        } else {
+            List<NeutronLoadBalancerHealthMonitor> bulk = input.getBulk();
+            Iterator<NeutronLoadBalancerHealthMonitor> i = bulk.iterator();
+            HashMap<String, NeutronLoadBalancerHealthMonitor> testMap = new HashMap<String, NeutronLoadBalancerHealthMonitor>();
+            Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerHealthMonitorAware.class, this, null);
+            while (i.hasNext()) {
+                NeutronLoadBalancerHealthMonitor test = i.next();
+
+                /*
+                 *  Verify that the firewall policy doesn't already exist
+                 */
+
+                if (loadBalancerHealthMonitorInterface
+                        .neutronLoadBalancerHealthMonitorExists(test.getLoadBalancerHealthMonitorID())) {
+                    throw new BadRequestException("LoadBalancerHealthMonitor UUID already is already created");
+                }
+                if (testMap.containsKey(test.getLoadBalancerHealthMonitorID())) {
+                    throw new BadRequestException("LoadBalancerHealthMonitor UUID already exists");
+                }
+                if (instances != null) {
+                    for (Object instance : instances) {
+                        INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+                        int status = service.canCreateNeutronLoadBalancerHealthMonitor(test);
+                        if (status < 200 || status > 299) {
+                            return Response.status(status).build();
+                        }
+                    }
+                }
+            }
+            /*
+             * now, each element of the bulk request can be added to the cache
+             */
+            i = bulk.iterator();
+            while (i.hasNext()) {
+                NeutronLoadBalancerHealthMonitor test = i.next();
+                loadBalancerHealthMonitorInterface.addNeutronLoadBalancerHealthMonitor(test);
+                if (instances != null) {
+                    for (Object instance : instances) {
+                        INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+                        service.neutronLoadBalancerHealthMonitorCreated(test);
+                    }
+                }
+            }
+        }
+        return Response.status(201).entity(input).build();
+    }
+
+    /**
+     * Updates a LoadBalancerHealthMonitor Policy
+     */
+    @Path("{loadBalancerHealthMonitorID}")
+    @PUT
+    @Produces({ MediaType.APPLICATION_JSON })
+    @Consumes({ MediaType.APPLICATION_JSON })
+    @StatusCodes({
+            @ResponseCode(code = 200, condition = "Operation successful"),
+            @ResponseCode(code = 400, condition = "Bad Request"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 403, condition = "Forbidden"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response updateLoadBalancerHealthMonitor(
+            @PathParam("loadBalancerHealthMonitorID") String loadBalancerHealthMonitorID,
+            final NeutronLoadBalancerHealthMonitorRequest input) {
+        INeutronLoadBalancerHealthMonitorCRUD loadBalancerHealthMonitorInterface = NeutronCRUDInterfaces
+                .getINeutronLoadBalancerHealthMonitorCRUD(this);
+        if (loadBalancerHealthMonitorInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerHealthMonitor CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+
+        /*
+         * verify the LoadBalancerHealthMonitor exists and there is only one delta provided
+         */
+        if (!loadBalancerHealthMonitorInterface.neutronLoadBalancerHealthMonitorExists(loadBalancerHealthMonitorID)) {
+            throw new ResourceNotFoundException("LoadBalancerHealthMonitor UUID does not exist.");
+        }
+        if (!input.isSingleton()) {
+            throw new BadRequestException("Only singleton edit supported");
+        }
+        NeutronLoadBalancerHealthMonitor delta = input.getSingleton();
+        NeutronLoadBalancerHealthMonitor original = loadBalancerHealthMonitorInterface
+                .getNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID);
+
+        /*
+         * updates restricted by Neutron
+         */
+        if (delta.getLoadBalancerHealthMonitorID() != null ||
+                delta.getLoadBalancerHealthMonitorTenantID() != null ||
+                delta.getLoadBalancerHealthMonitorType() != null ||
+                delta.getLoadBalancerHealthMonitorDelay() != null ||
+                delta.getLoadBalancerHealthMonitorTimeout() != null ||
+                delta.getLoadBalancerHealthMonitorMaxRetries() != null ||
+                delta.getLoadBalancerHealthMonitorHttpMethod() != null ||
+                delta.getLoadBalancerHealthMonitorUrlPath() != null ||
+                delta.getLoadBalancerHealthMonitorExpectedCodes() != null ||
+                delta.getLoadBalancerHealthMonitorAdminStateIsUp() != null ||
+                delta.getLoadBalancerHealthMonitorStatus() != null) {
+            throw new BadRequestException("Attribute edit blocked by Neutron");
+        }
+
+        Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerHealthMonitorAware.class, this, null);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+                int status = service.canUpdateNeutronLoadBalancerHealthMonitor(delta, original);
+                if (status < 200 || status > 299) {
+                    return Response.status(status).build();
+                }
+            }
+        }
+
+        /*
+         * update the object and return it
+         */
+        loadBalancerHealthMonitorInterface.updateNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID, delta);
+        NeutronLoadBalancerHealthMonitor updatedLoadBalancerHealthMonitor = loadBalancerHealthMonitorInterface
+                .getNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+                service.neutronLoadBalancerHealthMonitorUpdated(updatedLoadBalancerHealthMonitor);
+            }
+        }
+        return Response.status(200).entity(new NeutronLoadBalancerHealthMonitorRequest
+                (loadBalancerHealthMonitorInterface.getNeutronLoadBalancerHealthMonitor
+                        (loadBalancerHealthMonitorID))).build();
+    }
+
+
+
+    /**
+     * Deletes a LoadBalancerHealthMonitor
+     * */
+    @Path("{loadBalancerHealthMonitorID}")
+    @DELETE
+    @StatusCodes({
+            @ResponseCode(code = 204, condition = "No Content"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 409, condition = "Conflict"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response deleteLoadBalancerHealthMonitor(
+            @PathParam("loadBalancerHealthMonitorID") String loadBalancerHealthMonitorID) {
+        INeutronLoadBalancerHealthMonitorCRUD loadBalancerHealthMonitorInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerHealthMonitorCRUD(this);
+        if (loadBalancerHealthMonitorInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerHealthMonitor CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+        /*
+         * verify the LoadBalancerHealthMonitor exists and it isn't currently in use
+         */
+        if (!loadBalancerHealthMonitorInterface.neutronLoadBalancerHealthMonitorExists(loadBalancerHealthMonitorID)) {
+            throw new ResourceNotFoundException("LoadBalancerHealthMonitor UUID does not exist.");
+        }
+        if (loadBalancerHealthMonitorInterface.neutronLoadBalancerHealthMonitorInUse(loadBalancerHealthMonitorID)) {
+            return Response.status(409).build();
+        }
+        NeutronLoadBalancerHealthMonitor singleton = loadBalancerHealthMonitorInterface.getNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID);
+        Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerHealthMonitorAware.class, this, null);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+                int status = service.canDeleteNeutronLoadBalancerHealthMonitor(singleton);
+                if (status < 200 || status > 299) {
+                    return Response.status(status).build();
+                }
+            }
+        }
+        loadBalancerHealthMonitorInterface.removeNeutronLoadBalancerHealthMonitor(loadBalancerHealthMonitorID);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerHealthMonitorAware service = (INeutronLoadBalancerHealthMonitorAware) instance;
+                service.neutronLoadBalancerHealthMonitorDeleted(singleton);
+            }
+        }
+        return Response.status(204).build();
+    }
+}
diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerHealthMonitorRequest.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerHealthMonitorRequest.java
new file mode 100644 (file)
index 0000000..dc4af2f
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerHealthMonitor;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.List;
+
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerHealthMonitorRequest {
+    /**
+     * See OpenStack Network API v2.0 Reference for description of
+     * http://docs.openstack.org/api/openstack-network/2.0/content/
+     */
+
+    @XmlElement(name="healthmonitor")
+    NeutronLoadBalancerHealthMonitor singletonLoadBalancerHealthMonitor;
+
+    @XmlElement(name="healthmonitors")
+    List<NeutronLoadBalancerHealthMonitor> bulkRequest;
+
+    NeutronLoadBalancerHealthMonitorRequest() {
+    }
+
+    NeutronLoadBalancerHealthMonitorRequest(List<NeutronLoadBalancerHealthMonitor> bulk) {
+        bulkRequest = bulk;
+        singletonLoadBalancerHealthMonitor = null;
+    }
+
+    NeutronLoadBalancerHealthMonitorRequest(NeutronLoadBalancerHealthMonitor group) {
+        singletonLoadBalancerHealthMonitor = group;
+    }
+
+    public List<NeutronLoadBalancerHealthMonitor> getBulk() {
+        return bulkRequest;
+    }
+
+    public NeutronLoadBalancerHealthMonitor getSingleton() {
+        return singletonLoadBalancerHealthMonitor;
+    }
+
+    public boolean isSingleton() {
+        return (singletonLoadBalancerHealthMonitor != null);
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerListenerNorthbound.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerListenerNorthbound.java
new file mode 100644 (file)
index 0000000..f3ef39f
--- /dev/null
@@ -0,0 +1,391 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+
+import org.codehaus.enunciate.jaxrs.ResponseCode;
+import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerListenerAware;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerListenerCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerListener;
+import org.opendaylight.controller.northbound.commons.RestMessages;
+import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
+import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
+import org.opendaylight.controller.sal.utils.ServiceHelper;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Neutron Northbound REST APIs for LoadBalancerListener Policies.<br>
+ * This class provides REST APIs for managing neutron LoadBalancerListener Policies
+ *
+ * <br>
+ * <br>
+ * Authentication scheme : <b>HTTP Basic</b><br>
+ * Authentication realm : <b>opendaylight</b><br>
+ * Transport : <b>HTTP and HTTPS</b><br>
+ * <br>
+ * HTTPS Authentication is disabled by default. Administrator can enable it in
+ * tomcat-server.xml after adding a proper keystore / SSL certificate from a
+ * trusted authority.<br>
+ * More info :
+ * http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
+ *
+ */
+@Path("/listeners")
+public class NeutronLoadBalancerListenerNorthbound {
+
+    private NeutronLoadBalancerListener extractFields(NeutronLoadBalancerListener o, List<String> fields) {
+        return o.extractFields(fields);
+    }
+
+    /**
+     * Returns a list of all LoadBalancerListener */
+    @GET
+    @Produces({ MediaType.APPLICATION_JSON })
+    @StatusCodes({
+            @ResponseCode(code = 200, condition = "Operation successful"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+
+    public Response listGroups(
+            // return fields
+            @QueryParam("fields") List<String> fields,
+            // OpenStack LoadBalancerListener attributes
+            @QueryParam("id") String queryLoadBalancerListenerID,
+            @QueryParam("default_pool_id") String queryLoadBalancerListenerDefaultPoolID,
+            @QueryParam("tenant_id") String queryLoadBalancerListenerTenantID,
+            @QueryParam("name") String queryLoadBalancerListenerName,
+            @QueryParam("description") String queryLoadBalancerListenerDescription,
+            @QueryParam("shared") String queryLoadBalancerListenerIsShared,
+            @QueryParam("protocol") String queryLoadBalancerListenerProtocol,
+            @QueryParam("protocol_port") String queryLoadBalancerListenerProtocolPort,
+            @QueryParam("load_balancer_id") String queryLoadBalancerListenerLoadBalancerID,
+            @QueryParam("admin_state_up") String queryLoadBalancerListenerAdminIsUp,
+            @QueryParam("status") String queryLoadBalancerListenerStatus,
+            // pagination
+            @QueryParam("limit") String limit,
+            @QueryParam("marker") String marker,
+            @QueryParam("page_reverse") String pageReverse
+            // sorting not supported
+    ) {
+        INeutronLoadBalancerListenerCRUD loadBalancerListenerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerListenerCRUD(this);
+        //        INeutronLoadBalancerListenerRuleCRUD firewallRuleInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerListenerRuleCRUD(this);
+
+        if (loadBalancerListenerInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerListener CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+        List<NeutronLoadBalancerListener> allLoadBalancerListeners = loadBalancerListenerInterface.getAllNeutronLoadBalancerListeners();
+        //        List<NeutronLoadBalancerListenerRule> allLoadBalancerListenerRules = firewallRuleInterface.getAllNeutronLoadBalancerListenerRules();
+        List<NeutronLoadBalancerListener> ans = new ArrayList<NeutronLoadBalancerListener>();
+        //        List<NeutronLoadBalancerListenerRule> rules = new ArrayList<NeutronLoadBalancerListenerRule>();
+        Iterator<NeutronLoadBalancerListener> i = allLoadBalancerListeners.iterator();
+        while (i.hasNext()) {
+            NeutronLoadBalancerListener nsg = i.next();
+            if ((queryLoadBalancerListenerID == null ||
+                    queryLoadBalancerListenerID.equals(nsg.getLoadBalancerListenerID())) &&
+                    (queryLoadBalancerListenerDefaultPoolID == null ||
+                            queryLoadBalancerListenerDefaultPoolID.equals(nsg.getNeutronLoadBalancerListenerDefaultPoolID())) &&
+                    (queryLoadBalancerListenerTenantID == null ||
+                            queryLoadBalancerListenerTenantID.equals(nsg.getLoadBalancerListenerTenantID())) &&
+                    (queryLoadBalancerListenerName == null ||
+                            queryLoadBalancerListenerName.equals(nsg.getLoadBalancerListenerName())) &&
+                    (queryLoadBalancerListenerDescription == null ||
+                            queryLoadBalancerListenerDescription.equals(nsg.getLoadBalancerListenerDescription())) &&
+                    (queryLoadBalancerListenerIsShared == null ||
+                            queryLoadBalancerListenerIsShared.equals(nsg.getLoadBalancerListenerIsShared())) &&
+                    (queryLoadBalancerListenerProtocol == null ||
+                            queryLoadBalancerListenerProtocol.equals(nsg.getNeutronLoadBalancerListenerProtocol())) &&
+                    (queryLoadBalancerListenerProtocolPort == null ||
+                            queryLoadBalancerListenerProtocolPort.equals(nsg.getNeutronLoadBalancerListenerProtocolPort())) &&
+                    (queryLoadBalancerListenerLoadBalancerID == null ||
+                            queryLoadBalancerListenerLoadBalancerID.equals(nsg.getNeutronLoadBalancerListenerLoadBalancerID())) &&
+                    (queryLoadBalancerListenerAdminIsUp == null ||
+                            queryLoadBalancerListenerAdminIsUp.equals(nsg.getLoadBalancerListenerAdminStateIsUp())) &&
+                    (queryLoadBalancerListenerStatus == null ||
+                            queryLoadBalancerListenerStatus.equals(nsg.getLoadBalancerListenerStatus()))) {
+                if (fields.size() > 0) {
+                    ans.add(extractFields(nsg,fields));
+                } else {
+                    ans.add(nsg);
+                }
+            }
+        }
+        return Response.status(200).entity(
+                new NeutronLoadBalancerListenerRequest(ans)).build();
+    }
+
+    /**
+     * Returns a specific LoadBalancerListener */
+
+    @Path("{loadBalancerListenerID}")
+    @GET
+    @Produces({ MediaType.APPLICATION_JSON })
+    @StatusCodes({
+            @ResponseCode(code = 200, condition = "Operation successful"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response showLoadBalancerListener(@PathParam("loadBalancerListenerID") String loadBalancerListenerID,
+            // return fields
+            @QueryParam("fields") List<String> fields) {
+        INeutronLoadBalancerListenerCRUD loadBalancerListenerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerListenerCRUD(this);
+        if (loadBalancerListenerInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerListener CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+        if (!loadBalancerListenerInterface.neutronLoadBalancerListenerExists(loadBalancerListenerID)) {
+            throw new ResourceNotFoundException("LoadBalancerListener UUID does not exist.");
+        }
+        if (fields.size() > 0) {
+            NeutronLoadBalancerListener ans = loadBalancerListenerInterface.getNeutronLoadBalancerListener(loadBalancerListenerID);
+            return Response.status(200).entity(
+                    new NeutronLoadBalancerListenerRequest(extractFields(ans, fields))).build();
+        } else {
+            return Response.status(200).entity(new NeutronLoadBalancerListenerRequest(loadBalancerListenerInterface.getNeutronLoadBalancerListener(loadBalancerListenerID))).build();
+        }
+    }
+
+    /**
+     * Creates new LoadBalancerListener */
+
+    @POST
+    @Produces({ MediaType.APPLICATION_JSON })
+    @Consumes({ MediaType.APPLICATION_JSON })
+    @StatusCodes({
+            @ResponseCode(code = 201, condition = "Created"),
+            @ResponseCode(code = 400, condition = "Bad Request"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 403, condition = "Forbidden"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 409, condition = "Conflict"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response createLoadBalancerListeners(final NeutronLoadBalancerListenerRequest input) {
+        INeutronLoadBalancerListenerCRUD loadBalancerListenerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerListenerCRUD(this);
+        if (loadBalancerListenerInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerListener CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+        if (input.isSingleton()) {
+            NeutronLoadBalancerListener singleton = input.getSingleton();
+
+            /*
+             *  Verify that the LoadBalancerListener doesn't already exist.
+             */
+            if (loadBalancerListenerInterface.neutronLoadBalancerListenerExists(singleton.getLoadBalancerListenerID())) {
+                throw new BadRequestException("LoadBalancerListener UUID already exists");
+            }
+            loadBalancerListenerInterface.addNeutronLoadBalancerListener(singleton);
+
+            Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerListenerAware.class, this, null);
+            if (instances != null) {
+                for (Object instance : instances) {
+                    INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+                    int status = service.canCreateNeutronLoadBalancerListener(singleton);
+                    if (status < 200 || status > 299) {
+                        return Response.status(status).build();
+                    }
+                }
+            }
+            loadBalancerListenerInterface.addNeutronLoadBalancerListener(singleton);
+            if (instances != null) {
+                for (Object instance : instances) {
+                    INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+                    service.neutronLoadBalancerListenerCreated(singleton);
+                }
+            }
+        } else {
+            List<NeutronLoadBalancerListener> bulk = input.getBulk();
+            Iterator<NeutronLoadBalancerListener> i = bulk.iterator();
+            HashMap<String, NeutronLoadBalancerListener> testMap = new HashMap<String, NeutronLoadBalancerListener>();
+            Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerListenerAware.class, this, null);
+            while (i.hasNext()) {
+                NeutronLoadBalancerListener test = i.next();
+
+                /*
+                 *  Verify that the firewall policy doesn't already exist
+                 */
+
+                if (loadBalancerListenerInterface.neutronLoadBalancerListenerExists(test.getLoadBalancerListenerID())) {
+                    throw new BadRequestException("LoadBalancerListener UUID already is already created");
+                }
+                if (testMap.containsKey(test.getLoadBalancerListenerID())) {
+                    throw new BadRequestException("LoadBalancerListener UUID already exists");
+                }
+                if (instances != null) {
+                    for (Object instance : instances) {
+                        INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+                        int status = service.canCreateNeutronLoadBalancerListener(test);
+                        if (status < 200 || status > 299) {
+                            return Response.status(status).build();
+                        }
+                    }
+                }
+            }
+            /*
+             * now, each element of the bulk request can be added to the cache
+             */
+            i = bulk.iterator();
+            while (i.hasNext()) {
+                NeutronLoadBalancerListener test = i.next();
+                loadBalancerListenerInterface.addNeutronLoadBalancerListener(test);
+                if (instances != null) {
+                    for (Object instance : instances) {
+                        INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+                        service.neutronLoadBalancerListenerCreated(test);
+                    }
+                }
+            }
+        }
+        return Response.status(201).entity(input).build();
+    }
+
+    /**
+     * Updates a LoadBalancerListener Policy
+     */
+    @Path("{loadBalancerListenerID}")
+    @PUT
+    @Produces({ MediaType.APPLICATION_JSON })
+    @Consumes({ MediaType.APPLICATION_JSON })
+    @StatusCodes({
+            @ResponseCode(code = 200, condition = "Operation successful"),
+            @ResponseCode(code = 400, condition = "Bad Request"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 403, condition = "Forbidden"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response updateLoadBalancerListener(
+            @PathParam("loadBalancerListenerID") String loadBalancerListenerID, final NeutronLoadBalancerListenerRequest input) {
+        INeutronLoadBalancerListenerCRUD loadBalancerListenerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerListenerCRUD(this);
+        if (loadBalancerListenerInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerListener CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+
+        /*
+         * verify the LoadBalancerListener exists and there is only one delta provided
+         */
+        if (!loadBalancerListenerInterface.neutronLoadBalancerListenerExists(loadBalancerListenerID)) {
+            throw new ResourceNotFoundException("LoadBalancerListener UUID does not exist.");
+        }
+        if (!input.isSingleton()) {
+            throw new BadRequestException("Only singleton edit supported");
+        }
+        NeutronLoadBalancerListener delta = input.getSingleton();
+        NeutronLoadBalancerListener original = loadBalancerListenerInterface.getNeutronLoadBalancerListener(loadBalancerListenerID);
+
+        /*
+         * updates restricted by Neutron
+         */
+        if (delta.getLoadBalancerListenerID() != null ||
+                delta.getNeutronLoadBalancerListenerDefaultPoolID() != null ||
+                delta.getLoadBalancerListenerTenantID() != null ||
+                delta.getLoadBalancerListenerName() != null ||
+                delta.getLoadBalancerListenerDescription() != null ||
+                delta.getLoadBalancerListenerIsShared() != null ||
+                delta.getNeutronLoadBalancerListenerProtocol() != null ||
+                delta.getNeutronLoadBalancerListenerProtocolPort() != null ||
+                delta.getNeutronLoadBalancerListenerLoadBalancerID() != null ||
+                delta.getLoadBalancerListenerAdminStateIsUp() != null ||
+                delta.getLoadBalancerListenerStatus() != null) {
+            throw new BadRequestException("Attribute edit blocked by Neutron");
+        }
+
+        Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerListenerAware.class, this, null);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+                int status = service.canUpdateNeutronLoadBalancerListener(delta, original);
+                if (status < 200 || status > 299) {
+                    return Response.status(status).build();
+                }
+            }
+        }
+
+        /*
+         * update the object and return it
+         */
+        loadBalancerListenerInterface.updateNeutronLoadBalancerListener(loadBalancerListenerID, delta);
+        NeutronLoadBalancerListener updatedLoadBalancerListener = loadBalancerListenerInterface.getNeutronLoadBalancerListener(loadBalancerListenerID);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+                service.neutronLoadBalancerListenerUpdated(updatedLoadBalancerListener);
+            }
+        }
+        return Response.status(200).entity(new NeutronLoadBalancerListenerRequest(loadBalancerListenerInterface.getNeutronLoadBalancerListener(loadBalancerListenerID))).build();
+    }
+
+    /**
+     * Deletes a LoadBalancerListener */
+
+    @Path("{loadBalancerListenerID}")
+    @DELETE
+    @StatusCodes({
+            @ResponseCode(code = 204, condition = "No Content"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 409, condition = "Conflict"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response deleteLoadBalancerListener(
+            @PathParam("loadBalancerListenerID") String loadBalancerListenerID) {
+        INeutronLoadBalancerListenerCRUD loadBalancerListenerInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerListenerCRUD(this);
+        if (loadBalancerListenerInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerListener CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+
+        /*
+         * verify the LoadBalancerListener exists and it isn't currently in use
+         */
+        if (!loadBalancerListenerInterface.neutronLoadBalancerListenerExists(loadBalancerListenerID)) {
+            throw new ResourceNotFoundException("LoadBalancerListener UUID does not exist.");
+        }
+        if (loadBalancerListenerInterface.neutronLoadBalancerListenerInUse(loadBalancerListenerID)) {
+            return Response.status(409).build();
+        }
+        NeutronLoadBalancerListener singleton = loadBalancerListenerInterface.getNeutronLoadBalancerListener(loadBalancerListenerID);
+        Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerListenerAware.class, this, null);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+                int status = service.canDeleteNeutronLoadBalancerListener(singleton);
+                if (status < 200 || status > 299) {
+                    return Response.status(status).build();
+                }
+            }
+        }
+
+        loadBalancerListenerInterface.removeNeutronLoadBalancerListener(loadBalancerListenerID);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerListenerAware service = (INeutronLoadBalancerListenerAware) instance;
+                service.neutronLoadBalancerListenerDeleted(singleton);
+            }
+        }
+        return Response.status(204).build();
+    }
+}
diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerListenerRequest.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerListenerRequest.java
new file mode 100644 (file)
index 0000000..ba375af
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerListener;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.List;
+
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerListenerRequest {
+    /**
+     * See OpenStack Network API v2.0 Reference for description of
+     * http://docs.openstack.org/api/openstack-network/2.0/content/
+     */
+
+    @XmlElement(name="listener")
+    NeutronLoadBalancerListener singletonLoadBalancerListener;
+
+    @XmlElement(name="listeners")
+    List<NeutronLoadBalancerListener> bulkRequest;
+
+    NeutronLoadBalancerListenerRequest() {
+    }
+
+    NeutronLoadBalancerListenerRequest(List<NeutronLoadBalancerListener> bulk) {
+        bulkRequest = bulk;
+        singletonLoadBalancerListener = null;
+    }
+
+    NeutronLoadBalancerListenerRequest(NeutronLoadBalancerListener group) {
+        singletonLoadBalancerListener = group;
+    }
+
+    public List<NeutronLoadBalancerListener> getBulk() {
+        return bulkRequest;
+    }
+
+    public NeutronLoadBalancerListener getSingleton() {
+        return singletonLoadBalancerListener;
+    }
+
+    public boolean isSingleton() {
+        return (singletonLoadBalancerListener != null);
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerNorthbound.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerNorthbound.java
new file mode 100644 (file)
index 0000000..748dffc
--- /dev/null
@@ -0,0 +1,384 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+
+import org.codehaus.enunciate.jaxrs.ResponseCode;
+import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerAware;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancer;
+import org.opendaylight.controller.northbound.commons.RestMessages;
+import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
+import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
+import org.opendaylight.controller.sal.utils.ServiceHelper;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.DELETE;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Neutron Northbound REST APIs for LoadBalancer Policies.<br>
+ * This class provides REST APIs for managing neutron LoadBalancer Policies
+ *
+ * <br>
+ * <br>
+ * Authentication scheme : <b>HTTP Basic</b><br>
+ * Authentication realm : <b>opendaylight</b><br>
+ * Transport : <b>HTTP and HTTPS</b><br>
+ * <br>
+ * HTTPS Authentication is disabled by default. Administrator can enable it in
+ * tomcat-server.xml after adding a proper keystore / SSL certificate from a
+ * trusted authority.<br>
+ * More info :
+ * http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
+ *
+ */
+@Path("/loadbalancers")
+public class NeutronLoadBalancerNorthbound {
+
+    private NeutronLoadBalancer extractFields(NeutronLoadBalancer o, List<String> fields) {
+        return o.extractFields(fields);
+    }
+
+    /**
+     * Returns a list of all LoadBalancer */
+    @GET
+    @Produces({ MediaType.APPLICATION_JSON })
+    @StatusCodes({
+            @ResponseCode(code = 200, condition = "Operation successful"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+
+    public Response listGroups(
+            // return fields
+            @QueryParam("fields") List<String> fields,
+            // OpenStack LoadBalancer attributes
+            @QueryParam("id") String queryLoadBalancerID,
+            @QueryParam("tenant_id") String queryLoadBalancerTenantID,
+            @QueryParam("name") String queryLoadBalancerName,
+            @QueryParam("description") String queryLoadBalancerDescription,
+            @QueryParam("status") String queryLoadBalancerStatus,
+            @QueryParam("vip_address") String queryLoadBalancerVipAddress,
+            @QueryParam("vip_subnet") String queryLoadBalancerVipSubnet,
+            // pagination
+            @QueryParam("limit") String limit,
+            @QueryParam("marker") String marker,
+            @QueryParam("page_reverse") String pageReverse
+            // sorting not supported
+    ) {
+        INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+                this);
+        //        INeutronLoadBalancerRuleCRUD firewallRuleInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerRuleCRUD(this);
+
+        if (loadBalancerPoolInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+        List<NeutronLoadBalancer> allLoadBalancers = loadBalancerPoolInterface.getAllNeutronLoadBalancers();
+        //        List<NeutronLoadBalancerRule> allLoadBalancerRules = firewallRuleInterface.getAllNeutronLoadBalancerRules();
+        List<NeutronLoadBalancer> ans = new ArrayList<NeutronLoadBalancer>();
+        //        List<NeutronLoadBalancerRule> rules = new ArrayList<NeutronLoadBalancerRule>();
+        Iterator<NeutronLoadBalancer> i = allLoadBalancers.iterator();
+        while (i.hasNext()) {
+            NeutronLoadBalancer nsg = i.next();
+            if ((queryLoadBalancerID == null ||
+                    queryLoadBalancerID.equals(nsg.getLoadBalancerID())) &&
+                    (queryLoadBalancerTenantID == null ||
+                            queryLoadBalancerTenantID.equals(nsg.getLoadBalancerTenantID())) &&
+                    (queryLoadBalancerName == null ||
+                            queryLoadBalancerName.equals(nsg.getLoadBalancerName())) &&
+                    (queryLoadBalancerDescription == null ||
+                            queryLoadBalancerDescription.equals(nsg.getLoadBalancerDescription())) &&
+                    (queryLoadBalancerVipAddress == null ||
+                            queryLoadBalancerVipAddress.equals(nsg.getLoadBalancerVipAddress())) &&
+                    (queryLoadBalancerVipSubnet == null ||
+                            queryLoadBalancerVipSubnet.equals(nsg.getLoadBalancerVipSubnetID()))) {
+                if (fields.size() > 0) {
+                    ans.add(extractFields(nsg,fields));
+                } else {
+                    ans.add(nsg);
+                }
+            }
+        }
+        return Response.status(200).entity(
+                new NeutronLoadBalancerRequest(ans)).build();
+    }
+
+    /**
+     * Returns a specific LoadBalancer */
+
+    @Path("{loadBalancerPoolID}")
+    @GET
+    @Produces({ MediaType.APPLICATION_JSON })
+
+    @StatusCodes({
+            @ResponseCode(code = 200, condition = "Operation successful"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response showLoadBalancer(@PathParam("loadBalancerPoolID") String loadBalancerPoolID,
+            // return fields
+            @QueryParam("fields") List<String> fields) {
+        INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+                this);
+        if (loadBalancerPoolInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+        if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+            throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
+        }
+        if (fields.size() > 0) {
+            NeutronLoadBalancer ans = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+            return Response.status(200).entity(
+                    new NeutronLoadBalancerRequest(extractFields(ans, fields))).build();
+        } else {
+            return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer(
+                    loadBalancerPoolID))).build();
+        }
+    }
+
+    /**
+     * Creates new LoadBalancer */
+
+    @POST
+    @Produces({ MediaType.APPLICATION_JSON })
+    @Consumes({ MediaType.APPLICATION_JSON })
+
+    @StatusCodes({
+            @ResponseCode(code = 201, condition = "Created"),
+            @ResponseCode(code = 400, condition = "Bad Request"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 403, condition = "Forbidden"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 409, condition = "Conflict"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response createLoadBalancers(final NeutronLoadBalancerRequest input) {
+        INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+                this);
+        if (loadBalancerPoolInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+        if (input.isSingleton()) {
+            NeutronLoadBalancer singleton = input.getSingleton();
+
+            /*
+             *  Verify that the LoadBalancer doesn't already exist.
+             */
+            if (loadBalancerPoolInterface.neutronLoadBalancerExists(singleton.getLoadBalancerID())) {
+                throw new BadRequestException("LoadBalancer UUID already exists");
+            }
+            loadBalancerPoolInterface.addNeutronLoadBalancer(singleton);
+
+            Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
+            if (instances != null) {
+                for (Object instance : instances) {
+                    INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+                    int status = service.canCreateNeutronLoadBalancer(singleton);
+                    if (status < 200 || status > 299) {
+                        return Response.status(status).build();
+                    }
+                }
+            }
+            loadBalancerPoolInterface.addNeutronLoadBalancer(singleton);
+            if (instances != null) {
+                for (Object instance : instances) {
+                    INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+                    service.neutronLoadBalancerCreated(singleton);
+                }
+            }
+        } else {
+            List<NeutronLoadBalancer> bulk = input.getBulk();
+            Iterator<NeutronLoadBalancer> i = bulk.iterator();
+            HashMap<String, NeutronLoadBalancer> testMap = new HashMap<String, NeutronLoadBalancer>();
+            Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
+            while (i.hasNext()) {
+                NeutronLoadBalancer test = i.next();
+
+                /*
+                 *  Verify that the firewall policy doesn't already exist
+                 */
+
+                if (loadBalancerPoolInterface.neutronLoadBalancerExists(test.getLoadBalancerID())) {
+                    throw new BadRequestException("Load Balancer Pool UUID already is already created");
+                }
+                if (testMap.containsKey(test.getLoadBalancerID())) {
+                    throw new BadRequestException("Load Balancer Pool UUID already exists");
+                }
+                if (instances != null) {
+                    for (Object instance : instances) {
+                        INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+                        int status = service.canCreateNeutronLoadBalancer(test);
+                        if (status < 200 || status > 299) {
+                            return Response.status(status).build();
+                        }
+                    }
+                }
+            }
+            /*
+             * now, each element of the bulk request can be added to the cache
+             */
+            i = bulk.iterator();
+            while (i.hasNext()) {
+                NeutronLoadBalancer test = i.next();
+                loadBalancerPoolInterface.addNeutronLoadBalancer(test);
+                if (instances != null) {
+                    for (Object instance : instances) {
+                        INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+                        service.neutronLoadBalancerCreated(test);
+                    }
+                }
+            }
+        }
+        return Response.status(201).entity(input).build();
+    }
+
+    /**
+     * Updates a LoadBalancer Policy
+     */
+    @Path("{loadBalancerPoolID}")
+    @PUT
+    @Produces({ MediaType.APPLICATION_JSON })
+    @Consumes({ MediaType.APPLICATION_JSON })
+
+    @StatusCodes({
+            @ResponseCode(code = 200, condition = "Operation successful"),
+            @ResponseCode(code = 400, condition = "Bad Request"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 403, condition = "Forbidden"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response updateLoadBalancer(
+            @PathParam("loadBalancerPoolID") String loadBalancerPoolID, final NeutronLoadBalancerRequest input) {
+        INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+                this);
+        if (loadBalancerPoolInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+
+        /*
+         * verify the LoadBalancer exists and there is only one delta provided
+         */
+        if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+            throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
+        }
+        if (!input.isSingleton()) {
+            throw new BadRequestException("Only singleton edit supported");
+        }
+        NeutronLoadBalancer delta = input.getSingleton();
+        NeutronLoadBalancer original = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+
+        /*
+         * updates restricted by Neutron
+         */
+        if (delta.getLoadBalancerID() != null ||
+                delta.getLoadBalancerTenantID() != null ||
+                delta.getLoadBalancerName() != null ||
+                delta.getLoadBalancerDescription() != null ||
+                delta.getLoadBalancerStatus() != null ||
+                delta.getLoadBalancerVipAddress() != null ||
+                delta.getLoadBalancerVipSubnetID() != null) {
+            throw new BadRequestException("Attribute edit blocked by Neutron");
+        }
+
+        Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+                int status = service.canUpdateNeutronLoadBalancer(delta, original);
+                if (status < 200 || status > 299) {
+                    return Response.status(status).build();
+                }
+            }
+        }
+
+        /*
+         * update the object and return it
+         */
+        loadBalancerPoolInterface.updateNeutronLoadBalancer(loadBalancerPoolID, delta);
+        NeutronLoadBalancer updatedLoadBalancer = loadBalancerPoolInterface.getNeutronLoadBalancer(
+                loadBalancerPoolID);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+                service.neutronLoadBalancerUpdated(updatedLoadBalancer);
+            }
+        }
+        return Response.status(200).entity(new NeutronLoadBalancerRequest(loadBalancerPoolInterface.getNeutronLoadBalancer(
+                loadBalancerPoolID))).build();
+    }
+
+    /**
+     * Deletes a LoadBalancer */
+
+    @Path("{loadBalancerPoolID}")
+    @DELETE
+    @StatusCodes({
+            @ResponseCode(code = 204, condition = "No Content"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 409, condition = "Conflict"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response deleteLoadBalancer(
+            @PathParam("loadBalancerPoolID") String loadBalancerPoolID) {
+        INeutronLoadBalancerCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerCRUD(
+                this);
+        if (loadBalancerPoolInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancer CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+
+        /*
+         * verify the LoadBalancer exists and it isn't currently in use
+         */
+        if (!loadBalancerPoolInterface.neutronLoadBalancerExists(loadBalancerPoolID)) {
+            throw new ResourceNotFoundException("LoadBalancer UUID does not exist.");
+        }
+        if (loadBalancerPoolInterface.neutronLoadBalancerInUse(loadBalancerPoolID)) {
+            return Response.status(409).build();
+        }
+        NeutronLoadBalancer singleton = loadBalancerPoolInterface.getNeutronLoadBalancer(loadBalancerPoolID);
+        Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerAware.class, this, null);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+                int status = service.canDeleteNeutronLoadBalancer(singleton);
+                if (status < 200 || status > 299) {
+                    return Response.status(status).build();
+                }
+            }
+        }
+
+        loadBalancerPoolInterface.removeNeutronLoadBalancer(loadBalancerPoolID);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerAware service = (INeutronLoadBalancerAware) instance;
+                service.neutronLoadBalancerDeleted(singleton);
+            }
+        }
+        return Response.status(204).build();
+    }
+}
diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolMembersNorthbound.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolMembersNorthbound.java
new file mode 100644 (file)
index 0000000..ff56fa0
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+import org.codehaus.enunciate.jaxrs.ResponseCode;
+import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberAware;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolMemberCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPoolMember;
+import org.opendaylight.controller.northbound.commons.RestMessages;
+import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
+import org.opendaylight.controller.sal.utils.ServiceHelper;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+
+@Path("/pools/{loadBalancerPoolID}/members")
+public class NeutronLoadBalancerPoolMembersNorthbound {
+
+    private NeutronLoadBalancerPoolMember extractFields(NeutronLoadBalancerPoolMember o, List<String> fields) {
+        return o.extractFields(fields);
+    }
+/**
+ * Returns a list of all LoadBalancerPool
+ */
+@GET
+@Produces({MediaType.APPLICATION_JSON})
+@StatusCodes({
+        @ResponseCode(code = 200, condition = "Operation successful"),
+        @ResponseCode(code = 401, condition = "Unauthorized"),
+        @ResponseCode(code = 501, condition = "Not Implemented")})
+
+public Response listMembers(
+        // return fields
+        @QueryParam("fields") List<String> fields,
+        // OpenStack LoadBalancerPool attributes
+        @QueryParam("id") String queryLoadBalancerPoolMemberID,
+        @QueryParam("tenant_id") String queryLoadBalancerPoolMemberTenantID,
+        @QueryParam("address") String queryLoadBalancerPoolMemberAddress,
+        @QueryParam("protocol_port") String queryLoadBalancerPoolMemberProtoPort,
+        @QueryParam("admin_state_up") String queryLoadBalancerPoolMemberAdminStateUp,
+        @QueryParam("weight") String queryLoadBalancerPoolMemberWeight,
+        @QueryParam("subnet_id") String queryLoadBalancerPoolMemberSubnetID,
+        @QueryParam("status") String queryLoadBalancerPoolMemberStatus,
+
+        // pagination
+        @QueryParam("limit") String limit,
+        @QueryParam("marker") String marker,
+        @QueryParam("page_reverse") String pageReverse
+        // sorting not supported
+) {
+    INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces
+            .getINeutronLoadBalancerPoolMemberCRUD(this);
+    if (loadBalancerPoolMemberInterface == null) {
+        throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+                + RestMessages.SERVICEUNAVAILABLE.toString());
+    }
+    List<NeutronLoadBalancerPoolMember> allLoadBalancerPoolMembers = loadBalancerPoolMemberInterface
+            .getAllNeutronLoadBalancerPoolMembers();
+    List<NeutronLoadBalancerPoolMember> ans = new ArrayList<NeutronLoadBalancerPoolMember>();
+    Iterator<NeutronLoadBalancerPoolMember> i = allLoadBalancerPoolMembers.iterator();
+    while (i.hasNext()) {
+        NeutronLoadBalancerPoolMember nsg = i.next();
+        if ((queryLoadBalancerPoolMemberID == null ||
+                queryLoadBalancerPoolMemberID.equals(nsg.getPoolMemberID())) &&
+                (queryLoadBalancerPoolMemberTenantID == null ||
+                        queryLoadBalancerPoolMemberTenantID.equals(nsg.getPoolMemberTenantID())) &&
+                (queryLoadBalancerPoolMemberAddress == null ||
+                        queryLoadBalancerPoolMemberAddress.equals(nsg.getPoolMemberAddress())) &&
+                (queryLoadBalancerPoolMemberAdminStateUp == null ||
+                        queryLoadBalancerPoolMemberAdminStateUp.equals(nsg.getPoolMemberAdminStateIsUp())) &&
+                (queryLoadBalancerPoolMemberWeight == null ||
+                        queryLoadBalancerPoolMemberWeight.equals(nsg.getPoolMemberWeight())) &&
+                (queryLoadBalancerPoolMemberSubnetID == null ||
+                        queryLoadBalancerPoolMemberSubnetID.equals(nsg.getPoolMemberSubnetID())) &&
+                (queryLoadBalancerPoolMemberStatus == null ||
+                        queryLoadBalancerPoolMemberStatus.equals(nsg.getPoolMemberStatus()))) {
+            if (fields.size() > 0) {
+                ans.add(extractFields(nsg, fields));
+            } else {
+                ans.add(nsg);
+            }
+        }
+    }
+    return Response.status(200).entity(
+            new INeutronLoadBalancerPoolMemberRequest(ans)).build();
+}
+
+/**
+ * Adds a Member to an LBaaS Pool member
+ */
+@Path("/pools/{loadBalancerPoolID}/members")
+@PUT
+@Produces({MediaType.APPLICATION_JSON})
+@Consumes({MediaType.APPLICATION_JSON})
+@StatusCodes({
+        @ResponseCode(code = 200, condition = "Operation successful"),
+        @ResponseCode(code = 401, condition = "Unauthorized"),
+        @ResponseCode(code = 404, condition = "Not Found"),
+        @ResponseCode(code = 501, condition = "Not Implemented")})
+public Response createLoadBalancerPoolMember(  INeutronLoadBalancerPoolMemberRequest input) {
+
+    INeutronLoadBalancerPoolMemberCRUD loadBalancerPoolMemberInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolMemberCRUD(
+            this);
+    if (loadBalancerPoolMemberInterface == null) {
+        throw new ServiceUnavailableException("LoadBalancerPoolMember CRUD Interface "
+                + RestMessages.SERVICEUNAVAILABLE.toString());
+    }
+    if (input.isSingleton()) {
+        NeutronLoadBalancerPoolMember singleton = input.getSingleton();
+
+        /*
+         *  Verify that the LoadBalancerPoolMember doesn't already exist.
+         */
+        if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists(
+                singleton.getPoolMemberID())) {
+            throw new BadRequestException("LoadBalancerPoolMember UUID already exists");
+        }
+        loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton);
+
+        Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+                int status = service.canCreateNeutronLoadBalancerPoolMember(singleton);
+                if (status < 200 || status > 299) {
+                    return Response.status(status).build();
+                }
+            }
+        }
+        loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(singleton);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+                service.neutronLoadBalancerPoolMemberCreated(singleton);
+            }
+        }
+    } else {
+        List<NeutronLoadBalancerPoolMember> bulk = input.getBulk();
+        Iterator<NeutronLoadBalancerPoolMember> i = bulk.iterator();
+        HashMap<String, NeutronLoadBalancerPoolMember> testMap = new HashMap<String, NeutronLoadBalancerPoolMember>();
+        Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolMemberAware.class, this, null);
+        while (i.hasNext()) {
+            NeutronLoadBalancerPoolMember test = i.next();
+
+            /*
+             *  Verify that the firewall doesn't already exist
+             */
+
+            if (loadBalancerPoolMemberInterface.neutronLoadBalancerPoolMemberExists(
+                    test.getPoolMemberID())) {
+                throw new BadRequestException("Load Balancer PoolMember UUID already is already created");
+            }
+            if (testMap.containsKey(test.getPoolMemberID())) {
+                throw new BadRequestException("Load Balancer PoolMember UUID already exists");
+            }
+            if (instances != null) {
+                for (Object instance : instances) {
+                    INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+                    int status = service.canCreateNeutronLoadBalancerPoolMember(test);
+                    if (status < 200 || status > 299) {
+                        return Response.status(status).build();
+                    }
+                }
+            }
+        }
+        /*
+         * now, each element of the bulk request can be added to the cache
+         */
+        i = bulk.iterator();
+        while (i.hasNext()) {
+            NeutronLoadBalancerPoolMember test = i.next();
+            loadBalancerPoolMemberInterface.addNeutronLoadBalancerPoolMember(test);
+            if (instances != null) {
+                for (Object instance : instances) {
+                    INeutronLoadBalancerPoolMemberAware service = (INeutronLoadBalancerPoolMemberAware) instance;
+                    service.neutronLoadBalancerPoolMemberCreated(test);
+                }
+            }
+        }
+    }
+    return Response.status(201).entity(input).build();
+}
+}
diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolNorthbound.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolNorthbound.java
new file mode 100644 (file)
index 0000000..fc5357c
--- /dev/null
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+
+import org.codehaus.enunciate.jaxrs.ResponseCode;
+import org.codehaus.enunciate.jaxrs.StatusCodes;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolAware;
+import org.opendaylight.controller.networkconfig.neutron.INeutronLoadBalancerPoolCRUD;
+import org.opendaylight.controller.networkconfig.neutron.NeutronCRUDInterfaces;
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
+import org.opendaylight.controller.northbound.commons.RestMessages;
+import org.opendaylight.controller.northbound.commons.exception.BadRequestException;
+import org.opendaylight.controller.northbound.commons.exception.ResourceNotFoundException;
+import org.opendaylight.controller.northbound.commons.exception.ServiceUnavailableException;
+import org.opendaylight.controller.sal.utils.ServiceHelper;
+
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.PUT;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+
+/**
+ * Neutron Northbound REST APIs for LoadBalancerPool Policies.<br>
+ * This class provides REST APIs for managing neutron LoadBalancerPool Policies
+ *
+ * <br>
+ * <br>
+ * Authentication scheme : <b>HTTP Basic</b><br>
+ * Authentication realm : <b>opendaylight</b><br>
+ * Transport : <b>HTTP and HTTPS</b><br>
+ * <br>
+ * HTTPS Authentication is disabled by default. Administrator can enable it in
+ * tomcat-server.xml after adding a proper keystore / SSL certificate from a
+ * trusted authority.<br>
+ * More info :
+ * http://tomcat.apache.org/tomcat-7.0-doc/ssl-howto.html#Configuration
+ *
+ */
+@Path("/pools")
+public class NeutronLoadBalancerPoolNorthbound {
+
+    private NeutronLoadBalancerPool extractFields(NeutronLoadBalancerPool o, List<String> fields) {
+        return o.extractFields(fields);
+    }
+
+    /**
+     * Returns a list of all LoadBalancerPool
+     * */
+    @GET
+    @Produces({ MediaType.APPLICATION_JSON })
+    @StatusCodes({
+            @ResponseCode(code = 200, condition = "Operation successful"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+
+    public Response listGroups(
+            // return fields
+            @QueryParam("fields") List<String> fields,
+            // OpenStack LoadBalancerPool attributes
+            @QueryParam("id") String queryLoadBalancerPoolID,
+            @QueryParam("tenant_id") String queryLoadBalancerPoolTenantID,
+            @QueryParam("name") String queryLoadBalancerPoolName,
+            @QueryParam("description") String queryLoadBalancerDescription,
+            @QueryParam("protocol") String queryLoadBalancerProtocol,
+            @QueryParam("lb_algorithm") String queryLoadBalancerPoolLbAlgorithm,
+            @QueryParam("healthmonitor_id") String queryLoadBalancerPoolHealthMonitorID,
+            @QueryParam("admin_state_up") String queryLoadBalancerIsAdminStateUp,
+            @QueryParam("status") String queryLoadBalancerPoolStatus,
+            @QueryParam("members") List queryLoadBalancerPoolMembers,
+            // pagination
+            @QueryParam("limit") String limit,
+            @QueryParam("marker") String marker,
+            @QueryParam("page_reverse") String pageReverse
+            // sorting not supported
+    ) {
+        INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+        if (loadBalancerPoolInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+        List<NeutronLoadBalancerPool> allLoadBalancerPools = loadBalancerPoolInterface.getAllNeutronLoadBalancerPools();
+        List<NeutronLoadBalancerPool> ans = new ArrayList<NeutronLoadBalancerPool>();
+        Iterator<NeutronLoadBalancerPool> i = allLoadBalancerPools.iterator();
+        while (i.hasNext()) {
+            NeutronLoadBalancerPool nsg = i.next();
+            if ((queryLoadBalancerPoolID == null ||
+                    queryLoadBalancerPoolID.equals(nsg.getLoadBalancerPoolID())) &&
+                    (queryLoadBalancerPoolTenantID == null ||
+                            queryLoadBalancerPoolTenantID.equals(nsg.getLoadBalancerPoolTenantID())) &&
+                    (queryLoadBalancerPoolName == null ||
+                            queryLoadBalancerPoolName.equals(nsg.getLoadBalancerPoolName())) &&
+                    (queryLoadBalancerDescription == null ||
+                            queryLoadBalancerDescription.equals(nsg.getLoadBalancerPoolDescription())) &&
+                    (queryLoadBalancerPoolLbAlgorithm == null ||
+                            queryLoadBalancerPoolLbAlgorithm.equals(nsg.getLoadBalancerPoolLbAlgorithm())) &&
+                    (queryLoadBalancerPoolHealthMonitorID == null ||
+                            queryLoadBalancerPoolHealthMonitorID.equals(nsg.getNeutronLoadBalancerPoolHealthMonitorID())) &&
+                    (queryLoadBalancerIsAdminStateUp == null ||
+                            queryLoadBalancerIsAdminStateUp.equals(nsg.getLoadBalancerPoolAdminIsStateIsUp())) &&
+                    (queryLoadBalancerPoolStatus == null ||
+                            queryLoadBalancerPoolStatus.equals(nsg.getLoadBalancerPoolStatus())) &&
+                    (queryLoadBalancerPoolMembers.size() == 0 ||
+                            queryLoadBalancerPoolMembers.equals(nsg.getLoadBalancerPoolMembers()))) {
+                if (fields.size() > 0) {
+                    ans.add(extractFields(nsg,fields));
+                } else {
+                    ans.add(nsg);
+                }
+            }
+        }
+        return Response.status(200).entity(
+                new NeutronLoadBalancerPoolRequest(ans)).build();
+    }
+
+    /**
+     * Returns a specific LoadBalancerPool */
+
+    @Path("{loadBalancerPoolID}")
+    @GET
+    @Produces({ MediaType.APPLICATION_JSON })
+    @StatusCodes({
+            @ResponseCode(code = 200, condition = "Operation successful"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response showLoadBalancerPool(@PathParam("loadBalancerPoolID") String loadBalancerPoolID,
+            // return fields
+            @QueryParam("fields") List<String> fields) {
+        INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+        if (loadBalancerPoolInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+        if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolID)) {
+            throw new ResourceNotFoundException("LoadBalancerPool UUID does not exist.");
+        }
+        if (fields.size() > 0) {
+            NeutronLoadBalancerPool ans = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID);
+            return Response.status(200).entity(
+                    new NeutronLoadBalancerPoolRequest(extractFields(ans, fields))).build();
+        } else {
+            return Response.status(200).entity(new NeutronLoadBalancerPoolRequest(loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID))).build();
+        }
+    }
+
+    /**
+     * Creates new LoadBalancerPool */
+
+    @POST
+    @Produces({ MediaType.APPLICATION_JSON })
+    @Consumes({ MediaType.APPLICATION_JSON })
+    @StatusCodes({
+            @ResponseCode(code = 201, condition = "Created"),
+            @ResponseCode(code = 400, condition = "Bad Request"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 403, condition = "Forbidden"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 409, condition = "Conflict"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response createLoadBalancerPools(final NeutronLoadBalancerPoolRequest input) {
+        INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+        if (loadBalancerPoolInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+        if (input.isSingleton()) {
+            NeutronLoadBalancerPool singleton = input.getSingleton();
+
+            /*
+             *  Verify that the LoadBalancerPool doesn't already exist.
+             */
+            if (loadBalancerPoolInterface.neutronLoadBalancerPoolExists(singleton.getLoadBalancerPoolID())) {
+                throw new BadRequestException("LoadBalancerPool UUID already exists");
+            }
+            loadBalancerPoolInterface.addNeutronLoadBalancerPool(singleton);
+
+            Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolAware.class, this, null);
+            if (instances != null) {
+                for (Object instance : instances) {
+                    INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+                    int status = service.canCreateNeutronLoadBalancerPool(singleton);
+                    if (status < 200 || status > 299) {
+                        return Response.status(status).build();
+                    }
+                }
+            }
+            loadBalancerPoolInterface.addNeutronLoadBalancerPool(singleton);
+            if (instances != null) {
+                for (Object instance : instances) {
+                    INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+                    service.neutronLoadBalancerPoolCreated(singleton);
+                }
+            }
+        } else {
+            List<NeutronLoadBalancerPool> bulk = input.getBulk();
+            Iterator<NeutronLoadBalancerPool> i = bulk.iterator();
+            HashMap<String, NeutronLoadBalancerPool> testMap = new HashMap<String, NeutronLoadBalancerPool>();
+            Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolAware.class, this, null);
+            while (i.hasNext()) {
+                NeutronLoadBalancerPool test = i.next();
+
+                /*
+                 *  Verify that the firewall doesn't already exist
+                 */
+
+                if (loadBalancerPoolInterface.neutronLoadBalancerPoolExists(test.getLoadBalancerPoolID())) {
+                    throw new BadRequestException("Load Balancer Pool UUID already is already created");
+                }
+                if (testMap.containsKey(test.getLoadBalancerPoolID())) {
+                    throw new BadRequestException("Load Balancer Pool UUID already exists");
+                }
+                if (instances != null) {
+                    for (Object instance : instances) {
+                        INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+                        int status = service.canCreateNeutronLoadBalancerPool(test);
+                        if (status < 200 || status > 299) {
+                            return Response.status(status).build();
+                        }
+                    }
+                }
+            }
+            /*
+             * now, each element of the bulk request can be added to the cache
+             */
+            i = bulk.iterator();
+            while (i.hasNext()) {
+                NeutronLoadBalancerPool test = i.next();
+                loadBalancerPoolInterface.addNeutronLoadBalancerPool(test);
+                if (instances != null) {
+                    for (Object instance : instances) {
+                        INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+                        service.neutronLoadBalancerPoolCreated(test);
+                    }
+                }
+            }
+        }
+        return Response.status(201).entity(input).build();
+    }
+
+    /**
+     * Updates a LoadBalancerPool Policy
+     */
+    @Path("{loadBalancerPoolID}")
+    @PUT
+    @Produces({ MediaType.APPLICATION_JSON })
+    @Consumes({ MediaType.APPLICATION_JSON })
+    @StatusCodes({
+            @ResponseCode(code = 200, condition = "Operation successful"),
+            @ResponseCode(code = 400, condition = "Bad Request"),
+            @ResponseCode(code = 401, condition = "Unauthorized"),
+            @ResponseCode(code = 403, condition = "Forbidden"),
+            @ResponseCode(code = 404, condition = "Not Found"),
+            @ResponseCode(code = 501, condition = "Not Implemented") })
+    public Response updateLoadBalancerPool(
+            @PathParam("loadBalancerPoolID") String loadBalancerPoolID, final NeutronLoadBalancerPoolRequest input) {
+        INeutronLoadBalancerPoolCRUD loadBalancerPoolInterface = NeutronCRUDInterfaces.getINeutronLoadBalancerPoolCRUD(this);
+        if (loadBalancerPoolInterface == null) {
+            throw new ServiceUnavailableException("LoadBalancerPool CRUD Interface "
+                    + RestMessages.SERVICEUNAVAILABLE.toString());
+        }
+
+        /*
+         * verify the LoadBalancerPool exists and there is only one delta provided
+         */
+        if (!loadBalancerPoolInterface.neutronLoadBalancerPoolExists(loadBalancerPoolID)) {
+            throw new ResourceNotFoundException("LoadBalancerPool UUID does not exist.");
+        }
+        if (!input.isSingleton()) {
+            throw new BadRequestException("Only singleton edit supported");
+        }
+        NeutronLoadBalancerPool delta = input.getSingleton();
+        NeutronLoadBalancerPool original = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID);
+
+        /*
+         * updates restricted by Neutron
+         */
+        if (delta.getLoadBalancerPoolID() != null ||
+                delta.getLoadBalancerPoolTenantID() != null ||
+                delta.getLoadBalancerPoolName() != null ||
+                delta.getLoadBalancerPoolDescription() != null ||
+                delta.getLoadBalancerPoolProtocol() != null ||
+                delta.getLoadBalancerPoolLbAlgorithm() != null ||
+                delta.getNeutronLoadBalancerPoolHealthMonitorID() != null ||
+                delta.getLoadBalancerPoolAdminIsStateIsUp() != null ||
+                delta.getLoadBalancerPoolStatus() != null ||
+                delta.getLoadBalancerPoolMembers() != null) {
+            throw new BadRequestException("Attribute edit blocked by Neutron");
+        }
+
+        Object[] instances = ServiceHelper.getGlobalInstances(INeutronLoadBalancerPoolAware.class, this, null);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+                int status = service.canUpdateNeutronLoadBalancerPool(delta, original);
+                if (status < 200 || status > 299) {
+                    return Response.status(status).build();
+                }
+            }
+        }
+
+        /*
+         * update the object and return it
+         */
+        loadBalancerPoolInterface.updateNeutronLoadBalancerPool(loadBalancerPoolID, delta);
+        NeutronLoadBalancerPool updatedLoadBalancerPool = loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID);
+        if (instances != null) {
+            for (Object instance : instances) {
+                INeutronLoadBalancerPoolAware service = (INeutronLoadBalancerPoolAware) instance;
+                service.neutronLoadBalancerPoolUpdated(updatedLoadBalancerPool);
+            }
+        }
+        return Response.status(200).entity(new NeutronLoadBalancerPoolRequest(loadBalancerPoolInterface.getNeutronLoadBalancerPool(loadBalancerPoolID))).build();
+    }
+}
diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolRequest.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerPoolRequest.java
new file mode 100644 (file)
index 0000000..a1cdc41
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancerPool;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.List;
+
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerPoolRequest {
+    /**
+     * See OpenStack Network API v2.0 Reference for description of
+     * http://docs.openstack.org/api/openstack-network/2.0/content/
+     */
+
+    @XmlElement(name="pool")
+    NeutronLoadBalancerPool singletonLoadBalancerPool;
+
+    @XmlElement(name="pools")
+    List<NeutronLoadBalancerPool> bulkRequest;
+
+    NeutronLoadBalancerPoolRequest() {
+    }
+
+    NeutronLoadBalancerPoolRequest(List<NeutronLoadBalancerPool> bulk) {
+        bulkRequest = bulk;
+        singletonLoadBalancerPool = null;
+    }
+
+    NeutronLoadBalancerPoolRequest(NeutronLoadBalancerPool group) {
+        singletonLoadBalancerPool = group;
+    }
+
+    public List<NeutronLoadBalancerPool> getBulk() {
+        return bulkRequest;
+    }
+
+    public NeutronLoadBalancerPool getSingleton() {
+        return singletonLoadBalancerPool;
+    }
+
+    public boolean isSingleton() {
+        return (singletonLoadBalancerPool != null);
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerRequest.java b/opendaylight/northbound/networkconfiguration/neutron/src/main/java/org/opendaylight/controller/networkconfig/neutron/northbound/NeutronLoadBalancerRequest.java
new file mode 100644 (file)
index 0000000..1cf4e70
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 Red Hat, Inc.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.networkconfig.neutron.northbound;
+
+import org.opendaylight.controller.networkconfig.neutron.NeutronLoadBalancer;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.List;
+
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.NONE)
+
+public class NeutronLoadBalancerRequest {
+    /**
+     * See OpenStack Network API v2.0 Reference for description of
+     * http://docs.openstack.org/api/openstack-network/2.0/content/
+     */
+
+    @XmlElement(name="loadbalancer")
+    NeutronLoadBalancer singletonLoadBalancer;
+
+    @XmlElement(name="loadbalancers")
+    List<NeutronLoadBalancer> bulkRequest;
+
+    NeutronLoadBalancerRequest() {
+    }
+
+    NeutronLoadBalancerRequest(List<NeutronLoadBalancer> bulk) {
+        bulkRequest = bulk;
+        singletonLoadBalancer = null;
+    }
+
+    NeutronLoadBalancerRequest(NeutronLoadBalancer group) {
+        singletonLoadBalancer = group;
+    }
+
+    public List<NeutronLoadBalancer> getBulk() {
+        return bulkRequest;
+    }
+
+    public NeutronLoadBalancer getSingleton() {
+        return singletonLoadBalancer;
+    }
+
+    public boolean isSingleton() {
+        return (singletonLoadBalancer != null);
+    }
+}
\ No newline at end of file
index 9abcca7c53466880de769f93324ad09577142bf8..96d72cb9262657565973651a46a5940355db1064 100644 (file)
@@ -38,6 +38,11 @@ public class NeutronNorthboundRSApplication extends Application {
         classes.add(NeutronFirewallNorthbound.class);
         classes.add(NeutronFirewallPolicyNorthbound.class);
         classes.add(NeutronFirewallRulesNorthbound.class);
+        classes.add(NeutronLoadBalancerNorthbound.class);
+        classes.add(NeutronLoadBalancerListenerNorthbound.class);
+        classes.add(NeutronLoadBalancerPoolNorthbound.class);
+        classes.add(NeutronLoadBalancerHealthMonitorNorthbound.class);
+        classes.add(NeutronLoadBalancerPoolMembersNorthbound.class);
         return classes;
     }
 
index 806e853b3604ec7848e08176aaf7f8d8e8f3996f..0c02adad8a0d4d4ddf499efca66d39b05aff513c 100644 (file)
@@ -527,6 +527,9 @@ public class NeutronRoutersNorthbound {
         if (input.getPortUUID() != null &&
                 input.getSubnetUUID() == null) {
             NeutronRouter_Interface targetInterface = target.getInterfaces().get(input.getPortUUID());
+            if (targetInterface == null) {
+                throw new ResourceNotFoundException("Router interface not found for given Port UUID");
+            }
             input.setSubnetUUID(targetInterface.getSubnetUUID());
             input.setID(target.getID());
             input.setTenantID(target.getTenantID());
@@ -554,7 +557,7 @@ public class NeutronRoutersNorthbound {
                 throw new ResourceNotFoundException("Port UUID not found");
             }
             if (port.getFixedIPs() == null) {
-                throw new ResourceNotFoundException("Port UUID jas no fixed IPs");
+                throw new ResourceNotFoundException("Port UUID has no fixed IPs");
             }
             NeutronSubnet subnet = subnetInterface.getSubnet(input.getSubnetUUID());
             if (subnet == null) {
diff --git a/pom.xml b/pom.xml
index 8bebd2aa61050f0d8058d82f312ac572ec44bf86..e4c51b7839cff2f9af808d3fc78866bd0f599d45 100644 (file)
--- a/pom.xml
+++ b/pom.xml
     <module>opendaylight/commons/parent</module>
     <module>opendaylight/commons/logback_settings</module>
     <module>opendaylight/commons/filter-valve</module>
+    <module>opendaylight/commons/liblldp</module>
 
     <!-- Karaf Distribution -->
     <module>opendaylight/dummy-console</module>