Merge "BUG 2889 : migration of netconf-cli to NormalizedNode api's"
authorTony Tkacik <ttkacik@cisco.com>
Wed, 8 Apr 2015 09:09:47 +0000 (09:09 +0000)
committerGerrit Code Review <gerrit@opendaylight.org>
Wed, 8 Apr 2015 09:09:47 +0000 (09:09 +0000)
143 files changed:
features/mdsal/src/main/resources/features.xml
opendaylight/archetypes/opendaylight-startup/src/main/resources/archetype-resources/features/pom.xml
opendaylight/md-sal/md-sal-config/src/main/resources/initial/01-md-sal.xml
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/example/ExampleActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ElectionTermImpl.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/NoopProcedure.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActor.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContext.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/RaftActorContextImpl.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLog.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImpl.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/SnapshotManager.java
opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/DelegatingRaftActorBehavior.java [new file with mode: 0644]
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractRaftActorIntegrationTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/AbstractReplicatedLogImplTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/MockRaftActorContext.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/RaftActorTest.java
opendaylight/md-sal/sal-akka-raft/src/test/java/org/opendaylight/controller/cluster/raft/SnapshotManagerTest.java
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataObjectModification.java
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeChangeListener.java
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeChangeService.java
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeIdentifier.java
opendaylight/md-sal/sal-binding-api/src/main/java/org/opendaylight/controller/md/sal/binding/api/DataTreeModification.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataBrokerAdapter.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeListenerAdapter.java [new file with mode: 0644]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeServiceAdapter.java [new file with mode: 0644]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDataTreeChangeListenerRegistration.java [new file with mode: 0644]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingStructuralType.java [new file with mode: 0644]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingToNormalizedNodeCodec.java
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDataObjectModification.java [new file with mode: 0644]
opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDataTreeModification.java [new file with mode: 0644]
opendaylight/md-sal/sal-binding-broker/src/main/yang/opendaylight-binding-broker-impl.yang
opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/DataTreeChangeListenerTest.java [new file with mode: 0644]
opendaylight/md-sal/sal-binding-config/src/main/yang/opendaylight-md-sal-binding.yang
opendaylight/md-sal/sal-binding-it/src/main/java/org/opendaylight/controller/test/sal/binding/it/TestHelper.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DelegatingPersistentDataProvider.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/NonPersistentDataProvider.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/PersistentDataProvider.java [new file with mode: 0644]
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/AbstractUntypedPersistentActor.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/MeteredBoundedMailbox.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/common/actor/MeteringBehavior.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/datastore/node/utils/stream/NormalizedNodeOutputStreamWriter.java
opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/reporting/MetricsReporter.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractThreePhaseCommitCohort.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractTransactionContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ChainedTransactionProxy.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerSupport.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerActor.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxy.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerRegistrationActor.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerSupport.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DatastoreContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedDataTreeListenerRegistration.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedListenerRegistration.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelegateFactory.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DistributedDataStore.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ForwardingDataTreeChangeListener.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LeaderLocalDelegateFactory.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpDOMStoreThreePhaseCommitCohort.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpTransactionContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/Shard.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ShardManager.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ThreePhaseCommitCohortProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionChainProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContext.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionContextImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionOperation.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxy.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxyCleanupPhantomReference.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionRateLimitingCallback.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/WriteOnlyTransactionContextImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/compat/PreLithiumTransactionContextImpl.java
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/DatastoreInfoMXBean.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/DatastoreInfoMXBeanImpl.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeChangeListenerRegistration.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeChangeListenerRegistrationReply.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChanged.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChangedReply.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RegisterDataTreeChangeListener.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RegisterDataTreeChangeListenerReply.java [new file with mode: 0644]
opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/utils/ActorContext.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/ShardTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionProxyTest.java
opendaylight/md-sal/sal-distributed-datastore/src/test/java/org/opendaylight/controller/cluster/datastore/TransactionRateLimitingCommitCallbackTest.java
opendaylight/md-sal/sal-dom-api/src/main/java/org/opendaylight/controller/md/sal/dom/api/DOMDataTreeIdentifier.java
opendaylight/md-sal/sal-dom-broker/src/main/java/org/opendaylight/controller/md/sal/dom/broker/impl/DOMNotificationRouter.java
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/AbstractDOMStoreTransaction.java [moved from opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/AbstractDOMStoreTransaction.java with 55% similarity]
opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/ForwardingDOMStoreThreePhaseCommitCohort.java [new file with mode: 0644]
opendaylight/md-sal/sal-dom-xsql-config/src/main/resources/04-xsql.xml
opendaylight/md-sal/sal-dom-xsql/src/main/java/org/odl/xsql/JDBCDriver.java
opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/controller/md/sal/dom/xsql/TablesResultSet.java
opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/controller/md/sal/dom/xsql/XSQLAdapter.java
opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/controller/md/sal/dom/xsql/XSQLBluePrint.java
opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/controller/md/sal/dom/xsql/XSQLBluePrintNode.java
opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/controller/md/sal/dom/xsql/XSQLODLUtils.java
opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/controller/md/sal/dom/xsql/jdbc/JDBCResultSet.java
opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/controller/md/sal/dom/xsql/jdbc/JDBCServer.java
opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/xsql/XSQLProvider.java
opendaylight/md-sal/sal-dom-xsql/src/main/java/org/opendaylight/yang/gen/v1/http/netconfcentral/org/ns/xsql/rev140626/XSQLModule.java
opendaylight/md-sal/sal-dom-xsql/src/main/resources/04-xsql.xml
opendaylight/md-sal/sal-dom-xsql/src/main/yang/XSQL.yang
opendaylight/md-sal/sal-dom-xsql/src/test/java/org/opendaylight/xsql/test/XSQLTest.java
opendaylight/md-sal/sal-dom-xsql/src/test/resources/BluePrintCache.dat
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/ChainedTransactionCommitImpl.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMDataStore.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/InMemoryDOMStoreTreeChangePublisher.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SimpleDataTreeCandidate.java [deleted file]
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedReadTransaction.java
opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SnapshotBackedWriteTransaction.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/NetconfDevice.java
opendaylight/md-sal/sal-netconf-connector/src/main/java/org/opendaylight/controller/sal/connect/netconf/NetconfStateSchemas.java
opendaylight/md-sal/sal-netconf-connector/src/test/java/org/opendaylight/controller/sal/connect/netconf/NetconfDeviceTest.java
opendaylight/md-sal/sal-rest-docgen/src/main/resources/WEB-INF/web.xml
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/operations/editconfig/EditConfig.java
opendaylight/netconf/config-netconf-connector/src/main/java/org/opendaylight/controller/netconf/confignetconfconnector/osgi/YangStoreSnapshot.java
opendaylight/netconf/mdsal-netconf-connector/src/main/java/org/opendaylight/controller/config/yang/netconf/mdsal/mapper/NetconfMdsalMapperModule.java
opendaylight/netconf/mdsal-netconf-connector/src/main/java/org/opendaylight/controller/netconf/mdsal/connector/MdsalNetconfOperationService.java
opendaylight/netconf/mdsal-netconf-connector/src/main/java/org/opendaylight/controller/netconf/mdsal/connector/MdsalNetconfOperationServiceFactory.java
opendaylight/netconf/mdsal-netconf-connector/src/main/java/org/opendaylight/controller/netconf/mdsal/connector/OperationProvider.java
opendaylight/netconf/mdsal-netconf-connector/src/main/java/org/opendaylight/controller/netconf/mdsal/connector/ops/Commit.java
opendaylight/netconf/mdsal-netconf-connector/src/main/java/org/opendaylight/controller/netconf/mdsal/connector/ops/DiscardChanges.java
opendaylight/netconf/mdsal-netconf-connector/src/main/java/org/opendaylight/controller/netconf/mdsal/connector/ops/EditConfig.java
opendaylight/netconf/mdsal-netconf-connector/src/main/java/org/opendaylight/controller/netconf/mdsal/connector/ops/Lock.java
opendaylight/netconf/mdsal-netconf-connector/src/main/java/org/opendaylight/controller/netconf/mdsal/connector/ops/RuntimeRpc.java [new file with mode: 0644]
opendaylight/netconf/mdsal-netconf-connector/src/main/java/org/opendaylight/controller/netconf/mdsal/connector/ops/Unlock.java
opendaylight/netconf/mdsal-netconf-connector/src/main/java/org/opendaylight/controller/netconf/mdsal/connector/ops/get/AbstractGet.java
opendaylight/netconf/mdsal-netconf-connector/src/main/yang/netconf-mdsal-mapper.yang
opendaylight/netconf/mdsal-netconf-connector/src/test/java/org/opendaylight/controller/netconf/mdsal/connector/ops/RuntimeRpcTest.java [new file with mode: 0644]
opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/rpc-nonvoid-control.xml [new file with mode: 0644]
opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/rpc-nonvoid.xml [new file with mode: 0644]
opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/rpc-void-input-output.xml [new file with mode: 0644]
opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/rpc-void-output.xml [new file with mode: 0644]
opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/runtimerpc-ok-reply.xml [new file with mode: 0644]
opendaylight/netconf/mdsal-netconf-connector/src/test/resources/yang/mdsal-netconf-rpc-test.yang [new file with mode: 0644]
opendaylight/netconf/netconf-client/src/main/java/org/opendaylight/controller/netconf/client/NetconfClientSessionNegotiator.java
opendaylight/netconf/netconf-mdsal-config/src/main/resources/initial/08-netconf-mdsal.xml
opendaylight/netconf/netconf-monitoring/src/main/java/org/opendaylight/controller/netconf/monitoring/GetSchema.java
opendaylight/netconf/netconf-netty-util/src/main/java/org/opendaylight/controller/netconf/nettyutil/handler/ssh/client/AsyncSshHandler.java
opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/mapping/AbstractSingletonNetconfOperation.java
opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/messages/NetconfHelloMessage.java
opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/messages/NetconfMessageUtil.java
opendaylight/netconf/netconf-util/src/main/java/org/opendaylight/controller/netconf/util/osgi/NetconfConfigUtil.java
opendaylight/netconf/netconf-util/src/test/java/org/opendaylight/controller/netconf/util/osgi/NetconfConfigUtilTest.java

index 021ed1d5689a20d9fe1180b9b6e3fe9fb4e447c9..f8bbfeca49ad69d34d72fbf459a64d71e4892c22 100644 (file)
@@ -11,7 +11,7 @@
     <repository>mvn:org.opendaylight.controller/features-akka/${commons.opendaylight.version}/xml/features</repository>
     <feature name='odl-mdsal-all' version='${project.version}' description="OpenDaylight :: MDSAL :: All">
         <feature version='${project.version}'>odl-mdsal-broker</feature>
-        <feature version='${project.version}'>odl-mdsal-clustering</feature>
+        <feature version='${project.version}'>odl-mdsal-broker-local</feature>
         <feature version='${project.version}'>odl-mdsal-xsql</feature>
         <feature version='${project.version}'>odl-toaster</feature>
     </feature>
@@ -41,7 +41,7 @@
         <configfile finalname='${config.configfile.directory}/${config.netconf.mdsal.configfile}'>mvn:org.opendaylight.controller/netconf-mdsal-config/${netconf.version}/xml/config</configfile>
     </feature>
 
-    <feature name='odl-mdsal-broker' version='${project.version}' description="OpenDaylight :: MDSAL :: Broker">
+    <feature name='odl-mdsal-broker-local' version='${project.version}' description="OpenDaylight :: MDSAL :: Broker">
         <feature version='${yangtools.version}'>odl-yangtools-common</feature>
         <feature version='${yangtools.version}'>odl-yangtools-binding</feature>
         <feature version='${yangtools.version}'>odl-yangtools-models</feature>
@@ -76,7 +76,7 @@
         <configfile finalname="${config.configfile.directory}/${config.xsql.configfile}">mvn:org.opendaylight.controller/sal-dom-xsql-config/${project.version}/xml/config</configfile>
     </feature>
     <feature name ='odl-mdsal-clustering-commons' version='${project.version}'>
-        <feature version='${project.version}'>odl-mdsal-broker</feature>
+        <feature version='${project.version}'>odl-mdsal-broker-local</feature>
         <feature version='${akka.version}'>odl-akka-system</feature>
         <feature version='${akka.version}'>odl-akka-persistence</feature>
         <bundle>mvn:org.opendaylight.controller/sal-clustering-commons/${project.version}</bundle>
         <bundle>mvn:com.codahale.metrics/metrics-core/3.0.1</bundle>
     </feature>
     <feature name ='odl-mdsal-distributed-datastore' version='${project.version}'>
-        <feature version='${project.version}'>odl-mdsal-broker</feature>
+        <feature version='${project.version}'>odl-mdsal-broker-local</feature>
         <feature version='${project.version}'>odl-mdsal-clustering-commons</feature>
         <feature version='${akka.version}'>odl-akka-clustering</feature>
         <bundle>mvn:org.opendaylight.controller/sal-distributed-datastore/${project.version}</bundle>
     </feature>
     <feature name ='odl-mdsal-remoterpc-connector' version='${project.version}'>
-        <feature version='${project.version}'>odl-mdsal-broker</feature>
+        <feature version='${project.version}'>odl-mdsal-broker-local</feature>
         <feature version='${project.version}'>odl-mdsal-clustering-commons</feature>
         <feature version='${akka.version}'>odl-akka-clustering</feature>
         <feature version='0.7'>odl-akka-leveldb</feature>
         <bundle>mvn:org.opendaylight.controller/sal-remoterpc-connector/${project.version}</bundle>
     </feature>
-    <feature name ='odl-mdsal-clustering' version='${project.version}'>
+    <feature name ='odl-mdsal-broker' version='${project.version}'>
         <feature version='${project.version}'>odl-mdsal-remoterpc-connector</feature>
         <feature version='${project.version}'>odl-mdsal-distributed-datastore</feature>
         <configfile finalname="${config.configfile.directory}/${config.clustering.configfile}">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/config</configfile>
         <configfile finalname="configuration/initial/modules.conf">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/xml/moduleconf</configfile>
         <configfile finalname="etc/org.opendaylight.controller.cluster.datastore.cfg">mvn:org.opendaylight.controller/sal-clustering-config/${project.version}/cfg/datastore</configfile>
     </feature>
-
+    <feature name ='odl-mdsal-clustering' version='${project.version}'>
+      <feature version='${project.version}'>odl-mdsal-broker</feature>
+    </feature>
     <feature name='odl-clustering-test-app' version='${project.version}'>
         <feature version='${yangtools.version}'>odl-yangtools-models</feature>
-        <feature version='${project.version}'>odl-mdsal-broker</feature>
+        <feature version='${project.version}'>odl-mdsal-broker-local</feature>
         <bundle>mvn:org.opendaylight.controller.samples/clustering-it-model/${project.version}</bundle>
         <bundle>mvn:org.opendaylight.controller.samples/clustering-it-provider/${project.version}</bundle>
         <configfile finalname="${config.configfile.directory}/20-clustering-test-app.xml">mvn:org.opendaylight.controller.samples/clustering-it-config/${project.version}/xml/config</configfile>
index c5adb28db745ce6c676f21ccf3f10c4edd43f933..4afbedb76e44ada6766aa119c3b63c8f773d25ff 100644 (file)
@@ -70,6 +70,13 @@ and is available at http://www.eclipse.org/legal/epl-v10.html INTERNAL
       <artifactId>${artifactId}-impl</artifactId>
       <version>${symbol_dollar}{project.version}</version>
     </dependency>
+    <dependency>
+      <groupId>${symbol_dollar}{project.groupId}</groupId>
+      <artifactId>${artifactId}-impl</artifactId>
+      <version>${symbol_dollar}{project.version}</version>
+      <type>xml</type>
+      <classifier>config</classifier>
+    </dependency>
     <dependency>
       <groupId>${symbol_dollar}{project.groupId}</groupId>
       <artifactId>${artifactId}-api</artifactId>
index 5ef6a245ecf24ec5a673567f42a89329f50d8808..71c4850748ed968dc689a70ecb0650bede42ff45 100644 (file)
                             <provider>/modules/module[type='runtime-generated-mapping'][name='runtime-mapping-singleton']</provider>
                         </instance>
                     </service>
+                    <service>
+                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-normalized-node-serializer</type>
+                        <instance>
+                            <name>runtime-mapping-singleton</name>
+                            <provider>/modules/module[type='runtime-generated-mapping'][name='runtime-mapping-singleton']</provider>
+                        </instance>
+                    </service>
                     <service>
                         <type xmlns:binding-impl="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding:impl">binding-impl:binding-new-notification-service</type>
                         <instance>
index c5ae4c41b2f4822a04ca9da300171ae2e618d52e..ed19f21dedb8d5900a0df35aa31fc20ce6b8aee4 100644 (file)
@@ -19,7 +19,6 @@ import java.io.ObjectInputStream;
 import java.io.ObjectOutputStream;
 import java.util.HashMap;
 import java.util.Map;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.example.messages.KeyValue;
 import org.opendaylight.controller.cluster.example.messages.KeyValueSaved;
 import org.opendaylight.controller.cluster.example.messages.PrintRole;
@@ -38,7 +37,6 @@ import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payloa
 public class ExampleActor extends RaftActor {
 
     private final Map<String, String> state = new HashMap();
-    private final DataPersistenceProvider dataPersistenceProvider;
 
     private long persistIdentifier = 1;
     private final Optional<ActorRef> roleChangeNotifier;
@@ -47,7 +45,7 @@ public class ExampleActor extends RaftActor {
     public ExampleActor(String id, Map<String, String> peerAddresses,
         Optional<ConfigParams> configParams) {
         super(id, peerAddresses, configParams);
-        this.dataPersistenceProvider = new PersistentDataProvider();
+        setPersistence(true);
         roleChangeNotifier = createRoleChangeNotifier(id);
     }
 
@@ -185,11 +183,6 @@ public class ExampleActor extends RaftActor {
 
     }
 
-    @Override
-    protected DataPersistenceProvider persistence() {
-        return dataPersistenceProvider;
-    }
-
     @Override public void onReceiveRecover(Object message)throws Exception {
         super.onReceiveRecover(message);
     }
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ElectionTermImpl.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ElectionTermImpl.java
new file mode 100644 (file)
index 0000000..a22e57b
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
+import org.slf4j.Logger;
+
+/**
+ * Implementation of ElectionTerm for the RaftActor.
+ */
+class ElectionTermImpl implements ElectionTerm {
+    /**
+     * Identifier of the actor whose election term information this is
+     */
+    private long currentTerm = 0;
+    private String votedFor = null;
+
+    private final DataPersistenceProvider persistence;
+
+    private final Logger log;
+    private final String logId;
+
+    ElectionTermImpl(DataPersistenceProvider persistence, String logId, Logger log) {
+        this.persistence = persistence;
+        this.logId = logId;
+        this.log = log;
+    }
+
+    @Override
+    public long getCurrentTerm() {
+        return currentTerm;
+    }
+
+    @Override
+    public String getVotedFor() {
+        return votedFor;
+    }
+
+    @Override public void update(long currentTerm, String votedFor) {
+        if(log.isDebugEnabled()) {
+            log.debug("{}: Set currentTerm={}, votedFor={}", logId, currentTerm, votedFor);
+        }
+        this.currentTerm = currentTerm;
+        this.votedFor = votedFor;
+    }
+
+    @Override
+    public void updateAndPersist(long currentTerm, String votedFor){
+        update(currentTerm, votedFor);
+        // FIXME : Maybe first persist then update the state
+        persistence.persist(new UpdateElectionTerm(this.currentTerm, this.votedFor), NoopProcedure.instance());
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/NoopProcedure.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/NoopProcedure.java
new file mode 100644 (file)
index 0000000..c1267fa
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.japi.Procedure;
+
+/**
+ * An akka Procedure that does nothing.
+ *
+ * @author Thomas Pantelis
+ */
+public class NoopProcedure<T> implements Procedure<T> {
+
+    private static final NoopProcedure<Object> INSTANCE = new NoopProcedure<>();
+
+    private NoopProcedure() {
+    }
+
+    @SuppressWarnings("unchecked")
+    public static <T> NoopProcedure<T> instance() {
+        return (NoopProcedure<T>) INSTANCE;
+    }
+
+    @Override
+    public void apply(Object notUsed) {
+    }
+}
index b74259d4851153659df0c2866f6323b9234eff06..1c30fe23175b5af62ffe808d9428c3361688a4b0 100644 (file)
@@ -29,6 +29,9 @@ import java.util.Map;
 import java.util.concurrent.TimeUnit;
 import org.apache.commons.lang3.time.DurationFormatUtils;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.DelegatingPersistentDataProvider;
+import org.opendaylight.controller.cluster.NonPersistentDataProvider;
+import org.opendaylight.controller.cluster.PersistentDataProvider;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActor;
 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
 import org.opendaylight.controller.cluster.notifications.RoleChanged;
@@ -40,7 +43,7 @@ import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshotReply;
 import org.opendaylight.controller.cluster.raft.base.messages.Replicate;
 import org.opendaylight.controller.cluster.raft.behaviors.AbstractLeader;
-import org.opendaylight.controller.cluster.raft.behaviors.AbstractRaftActorBehavior;
+import org.opendaylight.controller.cluster.raft.behaviors.DelegatingRaftActorBehavior;
 import org.opendaylight.controller.cluster.raft.behaviors.Follower;
 import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
 import org.opendaylight.controller.cluster.raft.client.messages.FindLeader;
@@ -96,12 +99,6 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
 
     private static final long APPLY_STATE_DELAY_THRESHOLD_IN_NANOS = TimeUnit.MILLISECONDS.toNanos(50L); // 50 millis
 
-    private static final Procedure<ApplyJournalEntries> APPLY_JOURNAL_ENTRIES_PERSIST_CALLBACK =
-            new Procedure<ApplyJournalEntries>() {
-                @Override
-                public void apply(ApplyJournalEntries param) throws Exception {
-                }
-            };
     private static final String COMMIT_SNAPSHOT = "commit_snapshot";
 
     protected final Logger LOG = LoggerFactory.getLogger(getClass());
@@ -110,7 +107,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
      * The current state determines the current behavior of a RaftActor
      * A Raft Actor always starts off in the Follower State
      */
-    private RaftActorBehavior currentBehavior;
+    private final DelegatingRaftActorBehavior currentBehavior = new DelegatingRaftActorBehavior();
 
     /**
      * This context should NOT be passed directly to any other actor it is
@@ -118,12 +115,9 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
      */
     private final RaftActorContextImpl context;
 
-    private final Procedure<Void> createSnapshotProcedure = new CreateSnapshotProcedure();
+    private final DelegatingPersistentDataProvider delegatingPersistenceProvider = new DelegatingPersistentDataProvider(null);
 
-    /**
-     * The in-memory journal
-     */
-    private ReplicatedLogImpl replicatedLog = new ReplicatedLogImpl();
+    private final Procedure<Void> createSnapshotProcedure = new CreateSnapshotProcedure();
 
     private Stopwatch recoveryTimer;
 
@@ -139,10 +133,11 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
          Optional<ConfigParams> configParams) {
 
         context = new RaftActorContextImpl(this.getSelf(),
-            this.getContext(), id, new ElectionTermImpl(),
-            -1, -1, replicatedLog, peerAddresses,
-            (configParams.isPresent() ? configParams.get(): new DefaultConfigParamsImpl()),
-            LOG);
+            this.getContext(), id, new ElectionTermImpl(delegatingPersistenceProvider, id, LOG),
+            -1, -1, peerAddresses,
+            (configParams.isPresent() ? configParams.get(): new DefaultConfigParamsImpl()), LOG);
+
+        context.setReplicatedLog(ReplicatedLogImpl.newInstance(context, delegatingPersistenceProvider, currentBehavior));
     }
 
     private void initRecoveryTimer() {
@@ -185,7 +180,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
             } else if (message instanceof ApplyJournalEntries) {
                 onRecoveredApplyLogEntries(((ApplyJournalEntries) message).getToIndex());
             } else if (message instanceof DeleteEntries) {
-                replicatedLog.removeFrom(((DeleteEntries) message).getFromIndex());
+                replicatedLog().removeFrom(((DeleteEntries) message).getFromIndex());
             } else if (message instanceof UpdateElectionTerm) {
                 context.getTermInformation().update(((UpdateElectionTerm) message).getCurrentTerm(),
                         ((UpdateElectionTerm) message).getVotedFor());
@@ -220,9 +215,9 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         // Create a replicated log with the snapshot information
         // The replicated log can be used later on to retrieve this snapshot
         // when we need to install it on a peer
-        replicatedLog = new ReplicatedLogImpl(snapshot);
 
-        context.setReplicatedLog(replicatedLog);
+        context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, delegatingPersistenceProvider,
+                currentBehavior));
         context.setLastApplied(snapshot.getLastAppliedIndex());
         context.setCommitIndex(snapshot.getLastAppliedIndex());
 
@@ -233,8 +228,8 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
 
         timer.stop();
         LOG.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size=" +
-                replicatedLog.size(), persistenceId(), timer.toString(),
-                replicatedLog.getSnapshotIndex(), replicatedLog.getSnapshotTerm());
+                replicatedLog().size(), persistenceId(), timer.toString(),
+                replicatedLog().getSnapshotIndex(), replicatedLog().getSnapshotTerm());
     }
 
     private void onRecoveredJournalLogEntry(ReplicatedLogEntry logEntry) {
@@ -242,7 +237,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
             LOG.debug("{}: Received ReplicatedLogEntry for recovery: {}", persistenceId(), logEntry.getIndex());
         }
 
-        replicatedLog.append(logEntry);
+        replicatedLog().append(logEntry);
     }
 
     private void onRecoveredApplyLogEntries(long toIndex) {
@@ -252,7 +247,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         }
 
         for (long i = context.getLastApplied() + 1; i <= toIndex; i++) {
-            batchRecoveredLogEntry(replicatedLog.get(i));
+            batchRecoveredLogEntry(replicatedLog().get(i));
         }
 
         context.setLastApplied(toIndex);
@@ -298,8 +293,8 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
                 "Persistence Id =  " + persistenceId() +
                 " Last index in log={}, snapshotIndex={}, snapshotTerm={}, " +
                 "journal-size={}",
-            replicatedLog.lastIndex(), replicatedLog.getSnapshotIndex(),
-            replicatedLog.getSnapshotTerm(), replicatedLog.size());
+            replicatedLog().lastIndex(), replicatedLog().getSnapshotIndex(),
+            replicatedLog().getSnapshotTerm(), replicatedLog().size());
 
         initializeBehavior();
     }
@@ -309,9 +304,9 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
     }
 
     protected void changeCurrentBehavior(RaftActorBehavior newBehavior){
-        reusableBehaviorStateHolder.init(currentBehavior);
-        currentBehavior = newBehavior;
-        handleBehaviorChange(reusableBehaviorStateHolder, currentBehavior);
+        reusableBehaviorStateHolder.init(getCurrentBehavior());
+        setCurrentBehavior(newBehavior);
+        handleBehaviorChange(reusableBehaviorStateHolder, getCurrentBehavior());
     }
 
     @Override public void handleCommand(Object message) {
@@ -339,7 +334,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
                 LOG.debug("{}: Persisting ApplyLogEntries with index={}", persistenceId(), applyEntries.getToIndex());
             }
 
-            persistence().persist(applyEntries, APPLY_JOURNAL_ENTRIES_PERSIST_CALLBACK);
+            persistence().persist(applyEntries, NoopProcedure.instance());
 
         } else if(message instanceof ApplySnapshot ) {
             Snapshot snapshot = ((ApplySnapshot) message).getSnapshot();
@@ -354,8 +349,8 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
             applySnapshot(snapshot.getState());
 
             //clears the followers log, sets the snapshot index to ensure adjusted-index works
-            replicatedLog = new ReplicatedLogImpl(snapshot);
-            context.setReplicatedLog(replicatedLog);
+            context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context, delegatingPersistenceProvider,
+                    currentBehavior));
             context.setLastApplied(snapshot.getLastAppliedIndex());
 
         } else if (message instanceof FindLeader) {
@@ -392,11 +387,11 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         } else if (message.equals(COMMIT_SNAPSHOT)) {
             commitSnapshot(-1);
         } else {
-            reusableBehaviorStateHolder.init(currentBehavior);
+            reusableBehaviorStateHolder.init(getCurrentBehavior());
 
-            currentBehavior = currentBehavior.handleMessage(getSender(), message);
+            setCurrentBehavior(currentBehavior.handleMessage(getSender(), message));
 
-            handleBehaviorChange(reusableBehaviorStateHolder, currentBehavior);
+            handleBehaviorChange(reusableBehaviorStateHolder, getCurrentBehavior());
         }
     }
 
@@ -406,17 +401,17 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         OnDemandRaftState.Builder builder = OnDemandRaftState.builder()
                 .commitIndex(context.getCommitIndex())
                 .currentTerm(context.getTermInformation().getCurrentTerm())
-                .inMemoryJournalDataSize(replicatedLog.dataSize())
-                .inMemoryJournalLogSize(replicatedLog.size())
+                .inMemoryJournalDataSize(replicatedLog().dataSize())
+                .inMemoryJournalLogSize(replicatedLog().size())
                 .isSnapshotCaptureInitiated(context.getSnapshotManager().isCapturing())
                 .lastApplied(context.getLastApplied())
-                .lastIndex(replicatedLog.lastIndex())
-                .lastTerm(replicatedLog.lastTerm())
+                .lastIndex(replicatedLog().lastIndex())
+                .lastTerm(replicatedLog().lastTerm())
                 .leader(getLeaderId())
                 .raftState(currentBehavior.state().toString())
                 .replicatedToAllIndex(currentBehavior.getReplicatedToAllIndex())
-                .snapshotIndex(replicatedLog.getSnapshotIndex())
-                .snapshotTerm(replicatedLog.getSnapshotTerm())
+                .snapshotIndex(replicatedLog().getSnapshotIndex())
+                .snapshotTerm(replicatedLog().getSnapshotTerm())
                 .votedFor(context.getTermInformation().getVotedFor())
                 .peerAddresses(ImmutableMap.copyOf(context.getPeerAddresses()));
 
@@ -426,8 +421,8 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
             builder.lastLogTerm(lastLogEntry.getTerm());
         }
 
-        if(currentBehavior instanceof AbstractLeader) {
-            AbstractLeader leader = (AbstractLeader)currentBehavior;
+        if(getCurrentBehavior() instanceof AbstractLeader) {
+            AbstractLeader leader = (AbstractLeader)getCurrentBehavior();
             Collection<String> followerIds = leader.getFollowerIds();
             List<FollowerInfo> followerInfoList = Lists.newArrayListWithCapacity(followerIds.size());
             for(String id: followerIds) {
@@ -491,39 +486,49 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
 
         final RaftActorContext raftContext = getRaftActorContext();
 
-        replicatedLog
-                .appendAndPersist(replicatedLogEntry, new Procedure<ReplicatedLogEntry>() {
-                    @Override
-                    public void apply(ReplicatedLogEntry replicatedLogEntry) throws Exception {
-                        if(!hasFollowers()){
-                            // Increment the Commit Index and the Last Applied values
-                            raftContext.setCommitIndex(replicatedLogEntry.getIndex());
-                            raftContext.setLastApplied(replicatedLogEntry.getIndex());
+        replicatedLog().appendAndPersist(replicatedLogEntry, new Procedure<ReplicatedLogEntry>() {
+            @Override
+            public void apply(ReplicatedLogEntry replicatedLogEntry) throws Exception {
+                if(!hasFollowers()){
+                    // Increment the Commit Index and the Last Applied values
+                    raftContext.setCommitIndex(replicatedLogEntry.getIndex());
+                    raftContext.setLastApplied(replicatedLogEntry.getIndex());
 
-                            // Apply the state immediately
-                            applyState(clientActor, identifier, data);
+                    // Apply the state immediately
+                    applyState(clientActor, identifier, data);
 
-                            // Send a ApplyJournalEntries message so that we write the fact that we applied
-                            // the state to durable storage
-                            self().tell(new ApplyJournalEntries(replicatedLogEntry.getIndex()), self());
+                    // Send a ApplyJournalEntries message so that we write the fact that we applied
+                    // the state to durable storage
+                    self().tell(new ApplyJournalEntries(replicatedLogEntry.getIndex()), self());
 
-                            context.getSnapshotManager().trimLog(context.getLastApplied(), currentBehavior);
+                    context.getSnapshotManager().trimLog(context.getLastApplied(), currentBehavior);
 
-                        } else if (clientActor != null) {
-                            // Send message for replication
-                            currentBehavior.handleMessage(getSelf(),
-                                    new Replicate(clientActor, identifier,
-                                            replicatedLogEntry)
-                            );
-                        }
+                } else if (clientActor != null) {
+                    // Send message for replication
+                    currentBehavior.handleMessage(getSelf(),
+                            new Replicate(clientActor, identifier, replicatedLogEntry));
+                }
+            }
+        });
+    }
 
-                    }
-                });    }
+    private ReplicatedLog replicatedLog() {
+        return context.getReplicatedLog();
+    }
 
     protected String getId() {
         return context.getId();
     }
 
+    @VisibleForTesting
+    void setCurrentBehavior(RaftActorBehavior behavior) {
+        currentBehavior.setDelegate(behavior);
+    }
+
+    protected RaftActorBehavior getCurrentBehavior() {
+        return currentBehavior.getDelegate();
+    }
+
     /**
      * Derived actors can call the isLeader method to check if the current
      * RaftActor is the Leader or not
@@ -564,7 +569,7 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
     }
 
     protected ReplicatedLogEntry getLastLogEntry() {
-        return replicatedLog.last();
+        return replicatedLog().last();
     }
 
     protected Long getCurrentTerm(){
@@ -587,6 +592,41 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         context.setConfigParams(configParams);
     }
 
+    public final DataPersistenceProvider persistence() {
+        return delegatingPersistenceProvider.getDelegate();
+    }
+
+    public void setPersistence(DataPersistenceProvider provider) {
+        delegatingPersistenceProvider.setDelegate(provider);
+    }
+
+    protected void setPersistence(boolean persistent) {
+        if(persistent) {
+            setPersistence(new PersistentDataProvider(this));
+        } else {
+            setPersistence(new NonPersistentDataProvider() {
+                /**
+                 * The way snapshotting works is,
+                 * <ol>
+                 * <li> RaftActor calls createSnapshot on the Shard
+                 * <li> Shard sends a CaptureSnapshotReply and RaftActor then calls saveSnapshot
+                 * <li> When saveSnapshot is invoked on the akka-persistence API it uses the SnapshotStore to save
+                 * the snapshot. The SnapshotStore sends SaveSnapshotSuccess or SaveSnapshotFailure. When the
+                 * RaftActor gets SaveSnapshot success it commits the snapshot to the in-memory journal. This
+                 * commitSnapshot is mimicking what is done in SaveSnapshotSuccess.
+                 * </ol>
+                 */
+                @Override
+                public void saveSnapshot(Object o) {
+                    // Make saving Snapshot successful
+                    // Committing the snapshot here would end up calling commit in the creating state which would
+                    // be a state violation. That's why now we send a message to commit the snapshot.
+                    self().tell(COMMIT_SNAPSHOT, self());
+                }
+            });
+        }
+    }
+
     /**
      * setPeerAddress sets the address of a known peer at a later time.
      * <p>
@@ -688,8 +728,6 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
      */
     protected abstract void onStateChanged();
 
-    protected abstract DataPersistenceProvider persistence();
-
     /**
      * Notifier Actor for this RaftActor to notify when a role change happens
      * @return ActorRef - ActorRef of the notifier or Optional.absent if none.
@@ -718,125 +756,11 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
     private void handleCaptureSnapshotReply(byte[] snapshotBytes) {
         LOG.debug("{}: CaptureSnapshotReply received by actor: snapshot size {}", persistenceId(), snapshotBytes.length);
 
-        context.getSnapshotManager().persist(persistence(), snapshotBytes, currentBehavior, getTotalMemory());
-    }
-
-    protected long getTotalMemory() {
-        return Runtime.getRuntime().totalMemory();
+        context.getSnapshotManager().persist(persistence(), snapshotBytes, currentBehavior, context.getTotalMemory());
     }
 
     protected boolean hasFollowers(){
-        return getRaftActorContext().getPeerAddresses().keySet().size() > 0;
-    }
-
-    private class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
-        private static final int DATA_SIZE_DIVIDER = 5;
-        private long dataSizeSinceLastSnapshot = 0L;
-
-
-        public ReplicatedLogImpl(Snapshot snapshot) {
-            super(snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm(),
-                snapshot.getUnAppliedEntries());
-        }
-
-        public ReplicatedLogImpl() {
-            super();
-        }
-
-        @Override public void removeFromAndPersist(long logEntryIndex) {
-            int adjustedIndex = adjustedIndex(logEntryIndex);
-
-            if (adjustedIndex < 0) {
-                return;
-            }
-
-            // FIXME: Maybe this should be done after the command is saved
-            journal.subList(adjustedIndex , journal.size()).clear();
-
-            persistence().persist(new DeleteEntries(adjustedIndex), new Procedure<DeleteEntries>() {
-
-                @Override
-                public void apply(DeleteEntries param)
-                        throws Exception {
-                    //FIXME : Doing nothing for now
-                    dataSize = 0;
-                    for (ReplicatedLogEntry entry : journal) {
-                        dataSize += entry.size();
-                    }
-                }
-            });
-        }
-
-        @Override public void appendAndPersist(
-            final ReplicatedLogEntry replicatedLogEntry) {
-            appendAndPersist(replicatedLogEntry, null);
-        }
-
-        public void appendAndPersist(
-            final ReplicatedLogEntry replicatedLogEntry,
-            final Procedure<ReplicatedLogEntry> callback)  {
-
-            if(LOG.isDebugEnabled()) {
-                LOG.debug("{}: Append log entry and persist {} ", persistenceId(), replicatedLogEntry);
-            }
-
-            // FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability of the logs
-            journal.add(replicatedLogEntry);
-
-            // When persisting events with persist it is guaranteed that the
-            // persistent actor will not receive further commands between the
-            // persist call and the execution(s) of the associated event
-            // handler. This also holds for multiple persist calls in context
-            // of a single command.
-            persistence().persist(replicatedLogEntry,
-                new Procedure<ReplicatedLogEntry>() {
-                    @Override
-                    public void apply(ReplicatedLogEntry evt) throws Exception {
-                        int logEntrySize = replicatedLogEntry.size();
-
-                        dataSize += logEntrySize;
-                        long dataSizeForCheck = dataSize;
-
-                        dataSizeSinceLastSnapshot += logEntrySize;
-
-                        if (!hasFollowers()) {
-                            // When we do not have followers we do not maintain an in-memory log
-                            // due to this the journalSize will never become anything close to the
-                            // snapshot batch count. In fact will mostly be 1.
-                            // Similarly since the journal's dataSize depends on the entries in the
-                            // journal the journal's dataSize will never reach a value close to the
-                            // memory threshold.
-                            // By maintaining the dataSize outside the journal we are tracking essentially
-                            // what we have written to the disk however since we no longer are in
-                            // need of doing a snapshot just for the sake of freeing up memory we adjust
-                            // the real size of data by the DATA_SIZE_DIVIDER so that we do not snapshot as often
-                            // as if we were maintaining a real snapshot
-                            dataSizeForCheck = dataSizeSinceLastSnapshot / DATA_SIZE_DIVIDER;
-                        }
-                        long journalSize = replicatedLogEntry.getIndex() + 1;
-                        long dataThreshold = getTotalMemory() *
-                                context.getConfigParams().getSnapshotDataThresholdPercentage() / 100;
-
-                        if ((journalSize % context.getConfigParams().getSnapshotBatchCount() == 0
-                                || dataSizeForCheck > dataThreshold)) {
-
-                            boolean started = context.getSnapshotManager().capture(replicatedLogEntry,
-                                    currentBehavior.getReplicatedToAllIndex());
-
-                            if(started){
-                                dataSizeSinceLastSnapshot = 0;
-                            }
-
-                        }
-
-                        if (callback != null){
-                            callback.apply(replicatedLogEntry);
-                        }
-                    }
-                }
-            );
-        }
-
+        return getRaftActorContext().hasFollowers();
     }
 
     static class DeleteEntries implements Serializable {
@@ -852,46 +776,6 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         }
     }
 
-
-    private class ElectionTermImpl implements ElectionTerm {
-        /**
-         * Identifier of the actor whose election term information this is
-         */
-        private long currentTerm = 0;
-        private String votedFor = null;
-
-        @Override
-        public long getCurrentTerm() {
-            return currentTerm;
-        }
-
-        @Override
-        public String getVotedFor() {
-            return votedFor;
-        }
-
-        @Override public void update(long currentTerm, String votedFor) {
-            if(LOG.isDebugEnabled()) {
-                LOG.debug("{}: Set currentTerm={}, votedFor={}", persistenceId(), currentTerm, votedFor);
-            }
-            this.currentTerm = currentTerm;
-            this.votedFor = votedFor;
-        }
-
-        @Override
-        public void updateAndPersist(long currentTerm, String votedFor){
-            update(currentTerm, votedFor);
-            // FIXME : Maybe first persist then update the state
-            persistence().persist(new UpdateElectionTerm(this.currentTerm, this.votedFor), new Procedure<UpdateElectionTerm>(){
-
-                @Override public void apply(UpdateElectionTerm param)
-                    throws Exception {
-
-                }
-            });
-        }
-    }
-
     static class UpdateElectionTerm implements Serializable {
         private static final long serialVersionUID = 1L;
         private final long currentTerm;
@@ -911,34 +795,6 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         }
     }
 
-    protected class NonPersistentRaftDataProvider extends NonPersistentDataProvider {
-
-        public NonPersistentRaftDataProvider(){
-
-        }
-
-        /**
-         * The way snapshotting works is,
-         * <ol>
-         * <li> RaftActor calls createSnapshot on the Shard
-         * <li> Shard sends a CaptureSnapshotReply and RaftActor then calls saveSnapshot
-         * <li> When saveSnapshot is invoked on the akka-persistence API it uses the SnapshotStore to save the snapshot.
-         * The SnapshotStore sends SaveSnapshotSuccess or SaveSnapshotFailure. When the RaftActor gets SaveSnapshot
-         * success it commits the snapshot to the in-memory journal. This commitSnapshot is mimicking what is done
-         * in SaveSnapshotSuccess.
-         * </ol>
-         * @param o
-         */
-        @Override
-        public void saveSnapshot(Object o) {
-            // Make saving Snapshot successful
-            // Committing the snapshot here would end up calling commit in the creating state which would
-            // be a state violation. That's why now we send a message to commit the snapshot.
-            self().tell(COMMIT_SNAPSHOT, self());
-        }
-    }
-
-
     private class CreateSnapshotProcedure implements Procedure<Void> {
 
         @Override
@@ -947,15 +803,6 @@ public abstract class RaftActor extends AbstractUntypedPersistentActor {
         }
     }
 
-    @VisibleForTesting
-    void setCurrentBehavior(AbstractRaftActorBehavior behavior) {
-        currentBehavior = behavior;
-    }
-
-    protected RaftActorBehavior getCurrentBehavior() {
-        return currentBehavior;
-    }
-
     private static class BehaviorStateHolder {
         private RaftActorBehavior behavior;
         private String leaderId;
index 2e7eb5eb3aaf221e2889b412e0942d7ea71c241c..9f4b7cb4826b7d7be8eafc4eeec6503613204ad3 100644 (file)
@@ -12,6 +12,8 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
 import akka.actor.Props;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Supplier;
 import java.util.Map;
 import org.slf4j.Logger;
 
@@ -168,4 +170,11 @@ public interface RaftActorContext {
 
     SnapshotManager getSnapshotManager();
 
+    boolean hasFollowers();
+
+    long getTotalMemory();
+
+    @VisibleForTesting
+    void setTotalMemoryRetriever(Supplier<Long> retriever);
+
 }
index eb059d60fbee1c89d6a309e5cfaedbf5bc608c64..684845c27023185da3ad386771bcba6381b302d0 100644 (file)
@@ -14,6 +14,8 @@ import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
 import akka.actor.Props;
 import akka.actor.UntypedActorContext;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Supplier;
 import java.util.Map;
 import org.slf4j.Logger;
 
@@ -39,25 +41,22 @@ public class RaftActorContextImpl implements RaftActorContext {
 
     private ConfigParams configParams;
 
-    private boolean snapshotCaptureInitiated;
+    @VisibleForTesting
+    private Supplier<Long> totalMemoryRetriever;
 
     // Snapshot manager will need to be created on demand as it needs raft actor context which cannot
     // be passed to it in the constructor
     private SnapshotManager snapshotManager;
 
-    public RaftActorContextImpl(ActorRef actor, UntypedActorContext context,
-        String id,
-        ElectionTerm termInformation, long commitIndex,
-        long lastApplied, ReplicatedLog replicatedLog,
-        Map<String, String> peerAddresses, ConfigParams configParams,
-        Logger logger) {
+    public RaftActorContextImpl(ActorRef actor, UntypedActorContext context, String id,
+            ElectionTerm termInformation, long commitIndex, long lastApplied, Map<String, String> peerAddresses,
+            ConfigParams configParams, Logger logger) {
         this.actor = actor;
         this.context = context;
         this.id = id;
         this.termInformation = termInformation;
         this.commitIndex = commitIndex;
         this.lastApplied = lastApplied;
-        this.replicatedLog = replicatedLog;
         this.peerAddresses = peerAddresses;
         this.configParams = configParams;
         this.LOG = logger;
@@ -161,10 +160,26 @@ public class RaftActorContextImpl implements RaftActorContext {
         peerAddresses.put(peerId, peerAddress);
     }
 
+    @Override
     public SnapshotManager getSnapshotManager() {
         if(snapshotManager == null){
             snapshotManager = new SnapshotManager(this, LOG);
         }
         return snapshotManager;
     }
+
+    @Override
+    public long getTotalMemory() {
+        return totalMemoryRetriever != null ? totalMemoryRetriever.get() : Runtime.getRuntime().totalMemory();
+    }
+
+    @Override
+    public void setTotalMemoryRetriever(Supplier<Long> retriever) {
+        totalMemoryRetriever = retriever;
+    }
+
+    @Override
+    public boolean hasFollowers() {
+        return getPeerAddresses().keySet().size() > 0;
+    }
 }
index 82d0839bee772bd8efba88a8d1392ab1d336ff1c..3e4d727c7162a45fc32c942561d2599b61a45075 100644 (file)
@@ -8,6 +8,7 @@
 
 package org.opendaylight.controller.cluster.raft;
 
+import akka.japi.Procedure;
 import java.util.List;
 
 /**
@@ -85,6 +86,8 @@ public interface ReplicatedLog {
      */
     void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry);
 
+    void appendAndPersist(ReplicatedLogEntry replicatedLogEntry, Procedure<ReplicatedLogEntry> callback);
+
     /**
      *
      * @param index the index of the log entry
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImpl.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/ReplicatedLogImpl.java
new file mode 100644 (file)
index 0000000..fdb6305
--- /dev/null
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft;
+
+import akka.japi.Procedure;
+import java.util.Collections;
+import java.util.List;
+import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.RaftActor.DeleteEntries;
+import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
+
+/**
+ * Implementation of ReplicatedLog used by the RaftActor.
+ */
+class ReplicatedLogImpl extends AbstractReplicatedLogImpl {
+    private static final int DATA_SIZE_DIVIDER = 5;
+
+    private long dataSizeSinceLastSnapshot = 0L;
+    private final RaftActorContext context;
+    private final DataPersistenceProvider persistence;
+    private final RaftActorBehavior currentBehavior;
+
+    private final Procedure<DeleteEntries> deleteProcedure = new Procedure<DeleteEntries>() {
+        @Override
+        public void apply(DeleteEntries param) {
+            dataSize = 0;
+            for (ReplicatedLogEntry entry : journal) {
+                dataSize += entry.size();
+            }
+        }
+    };
+
+    static ReplicatedLog newInstance(Snapshot snapshot, RaftActorContext context,
+            DataPersistenceProvider persistence, RaftActorBehavior currentBehavior) {
+        return new ReplicatedLogImpl(snapshot.getLastAppliedIndex(), snapshot.getLastAppliedTerm(),
+                snapshot.getUnAppliedEntries(), context, persistence, currentBehavior);
+    }
+
+    static ReplicatedLog newInstance(RaftActorContext context,
+            DataPersistenceProvider persistence, RaftActorBehavior currentBehavior) {
+        return new ReplicatedLogImpl(-1L, -1L, Collections.<ReplicatedLogEntry>emptyList(), context,
+                persistence, currentBehavior);
+    }
+
+    private ReplicatedLogImpl(long snapshotIndex, long snapshotTerm, List<ReplicatedLogEntry> unAppliedEntries,
+            RaftActorContext context, DataPersistenceProvider persistence, RaftActorBehavior currentBehavior) {
+        super(snapshotIndex, snapshotTerm, unAppliedEntries);
+        this.context = context;
+        this.persistence = persistence;
+        this.currentBehavior = currentBehavior;
+    }
+
+    @Override
+    public void removeFromAndPersist(long logEntryIndex) {
+        int adjustedIndex = adjustedIndex(logEntryIndex);
+
+        if (adjustedIndex < 0) {
+            return;
+        }
+
+        // FIXME: Maybe this should be done after the command is saved
+        journal.subList(adjustedIndex , journal.size()).clear();
+
+        persistence.persist(new DeleteEntries(adjustedIndex), deleteProcedure);
+    }
+
+    @Override
+    public void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry) {
+        appendAndPersist(replicatedLogEntry, null);
+    }
+
+    @Override
+    public void appendAndPersist(final ReplicatedLogEntry replicatedLogEntry,
+            final Procedure<ReplicatedLogEntry> callback)  {
+
+        if(context.getLogger().isDebugEnabled()) {
+            context.getLogger().debug("{}: Append log entry and persist {} ", context.getId(), replicatedLogEntry);
+        }
+
+        // FIXME : By adding the replicated log entry to the in-memory journal we are not truly ensuring durability of the logs
+        journal.add(replicatedLogEntry);
+
+        // When persisting events with persist it is guaranteed that the
+        // persistent actor will not receive further commands between the
+        // persist call and the execution(s) of the associated event
+        // handler. This also holds for multiple persist calls in context
+        // of a single command.
+        persistence.persist(replicatedLogEntry,
+            new Procedure<ReplicatedLogEntry>() {
+                @Override
+                public void apply(ReplicatedLogEntry evt) throws Exception {
+                    int logEntrySize = replicatedLogEntry.size();
+
+                    dataSize += logEntrySize;
+                    long dataSizeForCheck = dataSize;
+
+                    dataSizeSinceLastSnapshot += logEntrySize;
+
+                    if (!context.hasFollowers()) {
+                        // When we do not have followers we do not maintain an in-memory log
+                        // due to this the journalSize will never become anything close to the
+                        // snapshot batch count. In fact will mostly be 1.
+                        // Similarly since the journal's dataSize depends on the entries in the
+                        // journal the journal's dataSize will never reach a value close to the
+                        // memory threshold.
+                        // By maintaining the dataSize outside the journal we are tracking essentially
+                        // what we have written to the disk however since we no longer are in
+                        // need of doing a snapshot just for the sake of freeing up memory we adjust
+                        // the real size of data by the DATA_SIZE_DIVIDER so that we do not snapshot as often
+                        // as if we were maintaining a real snapshot
+                        dataSizeForCheck = dataSizeSinceLastSnapshot / DATA_SIZE_DIVIDER;
+                    }
+                    long journalSize = replicatedLogEntry.getIndex() + 1;
+                    long dataThreshold = context.getTotalMemory() *
+                            context.getConfigParams().getSnapshotDataThresholdPercentage() / 100;
+
+                    if ((journalSize % context.getConfigParams().getSnapshotBatchCount() == 0
+                            || dataSizeForCheck > dataThreshold)) {
+
+                        boolean started = context.getSnapshotManager().capture(replicatedLogEntry,
+                                currentBehavior.getReplicatedToAllIndex());
+
+                        if(started){
+                            dataSizeSinceLastSnapshot = 0;
+                        }
+                    }
+
+                    if (callback != null){
+                        callback.apply(replicatedLogEntry);
+                    }
+                }
+            }
+        );
+    }
+}
\ No newline at end of file
index 432d678491e96a5509502265014ea755bc7e8eee..8121f75191e624cfd8595cd727985c73e1f3d5e7 100644 (file)
@@ -366,7 +366,7 @@ public class SnapshotManager implements SnapshotState {
         long getTerm();
     }
 
-    private static class LastAppliedTermInformationReader implements TermInformationReader{
+    static class LastAppliedTermInformationReader implements TermInformationReader{
         private long index;
         private long term;
 
diff --git a/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/DelegatingRaftActorBehavior.java b/opendaylight/md-sal/sal-akka-raft/src/main/java/org/opendaylight/controller/cluster/raft/behaviors/DelegatingRaftActorBehavior.java
new file mode 100644 (file)
index 0000000..776dae7
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.raft.behaviors;
+
+import akka.actor.ActorRef;
+import org.opendaylight.controller.cluster.raft.RaftState;
+
+/**
+ * A RaftActorBehavior implementation that delegates to another implementation.
+ *
+ * @author Thomas Pantelis
+ */
+public class DelegatingRaftActorBehavior implements RaftActorBehavior {
+    private RaftActorBehavior delegate;
+
+    public RaftActorBehavior getDelegate() {
+        return delegate;
+    }
+
+    public void setDelegate(RaftActorBehavior delegate) {
+        this.delegate = delegate;
+    }
+
+    @Override
+    public void close() throws Exception {
+        delegate.close();
+    }
+
+    @Override
+    public RaftActorBehavior handleMessage(ActorRef sender, Object message) {
+        return delegate.handleMessage(sender, message);
+    }
+
+    @Override
+    public RaftState state() {
+        return delegate.state();
+    }
+
+    @Override
+    public String getLeaderId() {
+        return delegate.getLeaderId();
+    }
+
+    @Override
+    public void setReplicatedToAllIndex(long replicatedToAllIndex) {
+        delegate.setReplicatedToAllIndex(replicatedToAllIndex);
+    }
+
+    @Override
+    public long getReplicatedToAllIndex() {
+        return delegate.getReplicatedToAllIndex();
+    }
+}
index 13445b0b26bb54f2794446647ab494ffb794fd26..b910313b096015ad166c8be1178ac8f72ad12167 100644 (file)
@@ -18,6 +18,7 @@ import akka.testkit.JavaTestKit;
 import akka.testkit.TestActorRef;
 import com.google.common.base.Optional;
 import com.google.common.base.Predicate;
+import com.google.common.base.Supplier;
 import com.google.common.collect.ImmutableMap;
 import java.util.Collections;
 import java.util.List;
@@ -52,12 +53,10 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
         private final TestActorRef<MessageCollectorActor> collectorActor;
         private final Map<Class<?>, Boolean> dropMessages = new ConcurrentHashMap<>();
         private volatile byte[] snapshot;
-        private volatile long mockTotalMemory;
 
         private TestRaftActor(String id, Map<String, String> peerAddresses, ConfigParams config,
                 TestActorRef<MessageCollectorActor> collectorActor) {
             super(id, peerAddresses, Optional.of(config), null);
-            dataPersistenceProvider = new PersistentDataProvider();
             this.collectorActor = collectorActor;
         }
 
@@ -75,13 +74,18 @@ public abstract class AbstractRaftActorIntegrationTest extends AbstractActorTest
             dropMessages.remove(msgClass);
         }
 
-        void setMockTotalMemory(long mockTotalMemory) {
-            this.mockTotalMemory = mockTotalMemory;
-        }
+        void setMockTotalMemory(final long mockTotalMemory) {
+            if(mockTotalMemory > 0) {
+                getRaftActorContext().setTotalMemoryRetriever(new Supplier<Long>() {
+                    @Override
+                    public Long get() {
+                        return mockTotalMemory;
+                    }
 
-        @Override
-        protected long getTotalMemory() {
-            return mockTotalMemory > 0 ? mockTotalMemory : super.getTotalMemory();
+                });
+            } else {
+                getRaftActorContext().setTotalMemoryRetriever(null);
+            }
         }
 
         @Override
index 885c3ab1094eb68d677fad1728fcbf6046e8f127..8fdb7ea226e835186df84d7b71cb4125f2b3a759 100644 (file)
@@ -11,6 +11,7 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
+import akka.japi.Procedure;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -220,5 +221,9 @@ public class AbstractReplicatedLogImplTest {
         public List<ReplicatedLogEntry> getEntriesTill(final int index) {
             return journal.subList(0, index);
         }
+
+        @Override
+        public void appendAndPersist(ReplicatedLogEntry replicatedLogEntry, Procedure<ReplicatedLogEntry> callback) {
+        }
     }
 }
index 53cca237413ea7383c912cda09a8b9e6d6e6f098..63f0df2f8c74c42e6a4d45bcf95a0f4aa707a48a 100644 (file)
@@ -12,7 +12,9 @@ import akka.actor.ActorRef;
 import akka.actor.ActorSelection;
 import akka.actor.ActorSystem;
 import akka.actor.Props;
+import akka.japi.Procedure;
 import com.google.common.base.Preconditions;
+import com.google.common.base.Supplier;
 import com.google.protobuf.GeneratedMessage;
 import java.io.Serializable;
 import java.util.HashMap;
@@ -203,6 +205,20 @@ public class MockRaftActorContext implements RaftActorContext {
         this.configParams = configParams;
     }
 
+    @Override
+    public long getTotalMemory() {
+        return Runtime.getRuntime().totalMemory();
+    }
+
+    @Override
+    public void setTotalMemoryRetriever(Supplier<Long> retriever) {
+    }
+
+    @Override
+    public boolean hasFollowers() {
+        return getPeerAddresses().keySet().size() > 0;
+    }
+
     public static class SimpleReplicatedLog extends AbstractReplicatedLogImpl {
         @Override public void appendAndPersist(
             ReplicatedLogEntry replicatedLogEntry) {
@@ -217,6 +233,19 @@ public class MockRaftActorContext implements RaftActorContext {
         @Override public void removeFromAndPersist(long index) {
             removeFrom(index);
         }
+
+        @Override
+        public void appendAndPersist(ReplicatedLogEntry replicatedLogEntry, Procedure<ReplicatedLogEntry> callback) {
+            append(replicatedLogEntry);
+
+            if(callback != null) {
+                try {
+                    callback.apply(replicatedLogEntry);
+                } catch (Exception e) {
+                    e.printStackTrace();
+                }
+            }
+        }
     }
 
     public static class MockPayload extends Payload implements Serializable {
index 0a4a2c7717facfcc9fc883c2234d4577ff49f876..17a81ac3c39aa9c0a27743e1739892629a157ba8 100644 (file)
@@ -54,9 +54,11 @@ import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.NonPersistentDataProvider;
 import org.opendaylight.controller.cluster.datastore.DataPersistenceProviderMonitor;
 import org.opendaylight.controller.cluster.notifications.LeaderStateChanged;
 import org.opendaylight.controller.cluster.notifications.RoleChanged;
+import org.opendaylight.controller.cluster.raft.RaftActor.UpdateElectionTerm;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplyJournalEntries;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplyLogEntries;
 import org.opendaylight.controller.cluster.raft.base.messages.ApplySnapshot;
@@ -97,7 +99,6 @@ public class RaftActorTest extends AbstractActorTest {
 
     public static class MockRaftActor extends RaftActor {
 
-        protected DataPersistenceProvider dataPersistenceProvider;
         private final RaftActor delegate;
         private final CountDownLatch recoveryComplete = new CountDownLatch(1);
         private final List<Object> state;
@@ -137,9 +138,9 @@ public class RaftActorTest extends AbstractActorTest {
             state = new ArrayList<>();
             this.delegate = mock(RaftActor.class);
             if(dataPersistenceProvider == null){
-                this.dataPersistenceProvider = new PersistentDataProvider();
+                setPersistence(true);
             } else {
-                this.dataPersistenceProvider = dataPersistenceProvider;
+                setPersistence(dataPersistenceProvider);
             }
         }
 
@@ -252,11 +253,6 @@ public class RaftActorTest extends AbstractActorTest {
             delegate.onStateChanged();
         }
 
-        @Override
-        protected DataPersistenceProvider persistence() {
-            return this.dataPersistenceProvider;
-        }
-
         @Override
         protected Optional<ActorRef> getRoleChangeNotifier() {
             return Optional.fromNullable(roleChangeNotifier);
@@ -542,7 +538,7 @@ public class RaftActorTest extends AbstractActorTest {
 
                 assertEquals("remove log entries", 1, replicatedLog.size());
 
-                mockRaftActor.onReceiveRecover(new RaftActor.UpdateElectionTerm(10, "foobar"));
+                mockRaftActor.onReceiveRecover(new UpdateElectionTerm(10, "foobar"));
 
                 assertEquals("election term", 10, mockRaftActor.getRaftActorContext().getTermInformation().getCurrentTerm());
                 assertEquals("voted for", "foobar", mockRaftActor.getRaftActorContext().getTermInformation().getVotedFor());
@@ -607,7 +603,7 @@ public class RaftActorTest extends AbstractActorTest {
 
                 assertEquals("remove log entries", 0, replicatedLog.size());
 
-                mockRaftActor.onReceiveRecover(new RaftActor.UpdateElectionTerm(10, "foobar"));
+                mockRaftActor.onReceiveRecover(new UpdateElectionTerm(10, "foobar"));
 
                 assertNotEquals("election term", 10, mockRaftActor.getRaftActorContext().getTermInformation().getCurrentTerm());
                 assertNotEquals("voted for", "foobar", mockRaftActor.getRaftActorContext().getTermInformation().getVotedFor());
@@ -986,7 +982,7 @@ public class RaftActorTest extends AbstractActorTest {
 
             TestActorRef<MockRaftActor> raftActorRef = factory.createTestActor(MockRaftActor.props(persistenceId,
                     Collections.<String, String>emptyMap(), Optional.<ConfigParams>of(config), notifierActor,
-                    new NonPersistentProvider()), persistenceId);
+                    new NonPersistentDataProvider()), persistenceId);
 
             List<RoleChanged> matches =  MessageCollectorActor.expectMatching(notifierActor, RoleChanged.class, 3);
 
@@ -1161,13 +1157,13 @@ public class RaftActorTest extends AbstractActorTest {
                         new MockRaftActorContext.MockPayload("foo-3"),
                         new MockRaftActorContext.MockPayload("foo-4")));
 
-                leaderActor.getRaftActorContext().getSnapshotManager().persist(new NonPersistentProvider()
+                leaderActor.getRaftActorContext().getSnapshotManager().persist(new NonPersistentDataProvider()
                         , snapshotBytes.toByteArray(), leader, Runtime.getRuntime().totalMemory());
 
                 assertFalse(leaderActor.getRaftActorContext().getSnapshotManager().isCapturing());
 
                 // The commit is needed to complete the snapshot creation process
-                leaderActor.getRaftActorContext().getSnapshotManager().commit(new NonPersistentProvider(), -1);
+                leaderActor.getRaftActorContext().getSnapshotManager().commit(new NonPersistentDataProvider(), -1);
 
                 // capture snapshot reply should remove the snapshotted entries only
                 assertEquals(3, leaderActor.getReplicatedLog().size());
@@ -1271,7 +1267,7 @@ public class RaftActorTest extends AbstractActorTest {
                 assertFalse(followerActor.getRaftActorContext().getSnapshotManager().isCapturing());
 
                 // The commit is needed to complete the snapshot creation process
-                followerActor.getRaftActorContext().getSnapshotManager().commit(new NonPersistentProvider(), -1);
+                followerActor.getRaftActorContext().getSnapshotManager().commit(new NonPersistentDataProvider(), -1);
 
                 // capture snapshot reply should remove the snapshotted entries only till replicatedToAllIndex
                 assertEquals(3, followerActor.getReplicatedLog().size()); //indexes 5,6,7 left in the log
@@ -1380,38 +1376,6 @@ public class RaftActorTest extends AbstractActorTest {
         };
     }
 
-
-    private static class NonPersistentProvider implements DataPersistenceProvider {
-        @Override
-        public boolean isRecoveryApplicable() {
-            return false;
-        }
-
-        @Override
-        public <T> void persist(T o, Procedure<T> procedure) {
-            try {
-                procedure.apply(o);
-            } catch (Exception e) {
-                e.printStackTrace();
-            }
-        }
-
-        @Override
-        public void saveSnapshot(Object o) {
-
-        }
-
-        @Override
-        public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
-
-        }
-
-        @Override
-        public void deleteMessages(long sequenceNumber) {
-
-        }
-    }
-
     @Test
     public void testRealSnapshotWhenReplicatedToAllIndexMinusOne() throws Exception {
         new JavaTestKit(getSystem()) {{
@@ -1421,7 +1385,7 @@ public class RaftActorTest extends AbstractActorTest {
             config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
             config.setSnapshotBatchCount(5);
 
-            DataPersistenceProvider dataPersistenceProvider = new NonPersistentProvider();
+            DataPersistenceProvider dataPersistenceProvider = new NonPersistentDataProvider();
 
             Map<String, String> peerAddresses = new HashMap<>();
 
@@ -1468,7 +1432,7 @@ public class RaftActorTest extends AbstractActorTest {
             config.setIsolatedLeaderCheckInterval(new FiniteDuration(1, TimeUnit.DAYS));
             config.setSnapshotBatchCount(5);
 
-            DataPersistenceProvider dataPersistenceProvider = new NonPersistentProvider();
+            DataPersistenceProvider dataPersistenceProvider = new NonPersistentDataProvider();
 
             Map<String, String> peerAddresses = new HashMap<>();
 
index 3d75edb5bdb875b54265a672a38f7e69baf41fa5..5a0d5aed741d0e9d1a943305fc24a5f94dae573a 100644 (file)
@@ -1,8 +1,9 @@
 package org.opendaylight.controller.cluster.raft;
 
-import static junit.framework.TestCase.assertFalse;
-import static junit.framework.TestCase.assertTrue;
+import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyLong;
 import static org.mockito.Mockito.doReturn;
@@ -15,6 +16,7 @@ import akka.actor.ActorRef;
 import akka.japi.Procedure;
 import akka.persistence.SnapshotSelectionCriteria;
 import akka.testkit.TestActorRef;
+import com.google.common.collect.ImmutableMap;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
@@ -25,6 +27,7 @@ import org.mockito.ArgumentCaptor;
 import org.mockito.Mock;
 import org.mockito.MockitoAnnotations;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.raft.SnapshotManager.LastAppliedTermInformationReader;
 import org.opendaylight.controller.cluster.raft.base.messages.CaptureSnapshot;
 import org.opendaylight.controller.cluster.raft.base.messages.SendInstallSnapshot;
 import org.opendaylight.controller.cluster.raft.behaviors.RaftActorBehavior;
@@ -68,6 +71,10 @@ public class SnapshotManagerTest extends AbstractActorTest {
         doReturn("123").when(mockRaftActorContext).getId();
         doReturn("123").when(mockRaftActorBehavior).getLeaderId();
 
+        ElectionTerm mockElectionTerm = mock(ElectionTerm.class);
+        doReturn(mockElectionTerm).when(mockRaftActorContext).getTermInformation();
+        doReturn(5L).when(mockElectionTerm).getCurrentTerm();
+
         snapshotManager = new SnapshotManager(mockRaftActorContext, LoggerFactory.getLogger(this.getClass()));
         factory = new TestActorFactory(getSystem());
 
@@ -161,13 +168,24 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
     @Test
     public void testPersistWhenReplicatedToAllIndexMinusOne(){
-        doReturn("123").when(mockRaftActorContext).getId();
-        doReturn(45L).when(mockReplicatedLog).getSnapshotIndex();
-        doReturn(6L).when(mockReplicatedLog).getSnapshotTerm();
+        doReturn(7L).when(mockReplicatedLog).getSnapshotIndex();
+        doReturn(1L).when(mockReplicatedLog).getSnapshotTerm();
+
+        doReturn(ImmutableMap.builder().put("follower-1", "").build()).when(mockRaftActorContext).getPeerAddresses();
+
+        doReturn(8L).when(mockRaftActorContext).getLastApplied();
+
+        MockRaftActorContext.MockReplicatedLogEntry lastLogEntry = new MockRaftActorContext.MockReplicatedLogEntry(
+                3L, 9L, new MockRaftActorContext.MockPayload());
+
+        MockRaftActorContext.MockReplicatedLogEntry lastAppliedEntry = new MockRaftActorContext.MockReplicatedLogEntry(
+                2L, 8L, new MockRaftActorContext.MockPayload());
+
+        doReturn(lastAppliedEntry).when(mockReplicatedLog).get(8L);
+        doReturn(Arrays.asList(lastLogEntry)).when(mockReplicatedLog).getFrom(9L);
 
         // when replicatedToAllIndex = -1
-        snapshotManager.capture(new MockRaftActorContext.MockReplicatedLogEntry(6,9,
-                new MockRaftActorContext.MockPayload()), -1);
+        snapshotManager.capture(lastLogEntry, -1);
 
         snapshotManager.create(mockProcedure);
 
@@ -180,15 +198,14 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
         Snapshot snapshot = snapshotArgumentCaptor.getValue();
 
-        assertEquals(6, snapshot.getLastAppliedTerm());
-        assertEquals(9, snapshot.getLastAppliedIndex());
-        assertEquals(9, snapshot.getLastIndex());
-        assertEquals(6, snapshot.getLastTerm());
-        assertEquals(10, snapshot.getState().length);
-        assertTrue(Arrays.equals(bytes, snapshot.getState()));
-        assertEquals(0, snapshot.getUnAppliedEntries().size());
+        assertEquals("getLastTerm", 3L, snapshot.getLastTerm());
+        assertEquals("getLastIndex", 9L, snapshot.getLastIndex());
+        assertEquals("getLastAppliedTerm", 2L, snapshot.getLastAppliedTerm());
+        assertEquals("getLastAppliedIndex", 8L, snapshot.getLastAppliedIndex());
+        assertArrayEquals("getState", bytes, snapshot.getState());
+        assertEquals("getUnAppliedEntries", Arrays.asList(lastLogEntry), snapshot.getUnAppliedEntries());
 
-        verify(mockReplicatedLog).snapshotPreCommit(45L, 6L);
+        verify(mockReplicatedLog).snapshotPreCommit(7L, 1L);
     }
 
 
@@ -201,6 +218,8 @@ public class SnapshotManagerTest extends AbstractActorTest {
         snapshotManager.create(mockProcedure);
 
         verify(mockProcedure).apply(null);
+
+        assertEquals("isCapturing", true, snapshotManager.isCapturing());
     }
 
     @Test
@@ -258,10 +277,21 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
         snapshotManager.create(mockProcedure);
 
-        snapshotManager.persist(mockDataPersistenceProvider, new byte[]{}, mockRaftActorBehavior
+        byte[] bytes = new byte[] {1,2,3,4,5,6,7,8,9,10};
+        snapshotManager.persist(mockDataPersistenceProvider, bytes, mockRaftActorBehavior
                 , Runtime.getRuntime().totalMemory());
 
-        verify(mockDataPersistenceProvider).saveSnapshot(any(Snapshot.class));
+        ArgumentCaptor<Snapshot> snapshotArgumentCaptor = ArgumentCaptor.forClass(Snapshot.class);
+        verify(mockDataPersistenceProvider).saveSnapshot(snapshotArgumentCaptor.capture());
+
+        Snapshot snapshot = snapshotArgumentCaptor.getValue();
+
+        assertEquals("getLastTerm", 6L, snapshot.getLastTerm());
+        assertEquals("getLastIndex", 9L, snapshot.getLastIndex());
+        assertEquals("getLastAppliedTerm", 6L, snapshot.getLastAppliedTerm());
+        assertEquals("getLastAppliedIndex", 9L, snapshot.getLastAppliedIndex());
+        assertArrayEquals("getState", bytes, snapshot.getState());
+        assertEquals("getUnAppliedEntries size", 0, snapshot.getUnAppliedEntries().size());
 
         verify(mockReplicatedLog).snapshotPreCommit(9L, 6L);
 
@@ -482,20 +512,73 @@ public class SnapshotManagerTest extends AbstractActorTest {
     }
 
     @Test
-    public void testTrimLog(){
-        ElectionTerm mockElectionTerm = mock(ElectionTerm.class);
-        ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+    public void testTrimLogWhenTrimIndexLessThanLastApplied() {
         doReturn(20L).when(mockRaftActorContext).getLastApplied();
+
+        ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
         doReturn(true).when(mockReplicatedLog).isPresent(10);
-        doReturn(mockElectionTerm).when(mockRaftActorContext).getTermInformation();
-        doReturn(5L).when(mockElectionTerm).getCurrentTerm();
         doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
         doReturn(5L).when(replicatedLogEntry).getTerm();
 
-        snapshotManager.trimLog(10, mockRaftActorBehavior);
+        long retIndex = snapshotManager.trimLog(10, mockRaftActorBehavior);
+        assertEquals("return index", 10L, retIndex);
 
         verify(mockReplicatedLog).snapshotPreCommit(10, 5);
         verify(mockReplicatedLog).snapshotCommit();
+
+        verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
+    }
+
+    @Test
+    public void testTrimLogWhenLastAppliedNotSet() {
+        doReturn(-1L).when(mockRaftActorContext).getLastApplied();
+
+        ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+        doReturn(true).when(mockReplicatedLog).isPresent(10);
+        doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
+        doReturn(5L).when(replicatedLogEntry).getTerm();
+
+        long retIndex = snapshotManager.trimLog(10, mockRaftActorBehavior);
+        assertEquals("return index", -1L, retIndex);
+
+        verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
+        verify(mockReplicatedLog, never()).snapshotCommit();
+
+        verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
+    }
+
+    @Test
+    public void testTrimLogWhenLastAppliedZero() {
+        doReturn(0L).when(mockRaftActorContext).getLastApplied();
+
+        ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
+        doReturn(true).when(mockReplicatedLog).isPresent(10);
+        doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
+        doReturn(5L).when(replicatedLogEntry).getTerm();
+
+        long retIndex = snapshotManager.trimLog(10, mockRaftActorBehavior);
+        assertEquals("return index", -1L, retIndex);
+
+        verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
+        verify(mockReplicatedLog, never()).snapshotCommit();
+
+        verify(mockRaftActorBehavior, never()).setReplicatedToAllIndex(anyLong());
+    }
+
+    @Test
+    public void testTrimLogWhenTrimIndexNotPresent() {
+        doReturn(20L).when(mockRaftActorContext).getLastApplied();
+
+        doReturn(false).when(mockReplicatedLog).isPresent(10);
+
+        long retIndex = snapshotManager.trimLog(10, mockRaftActorBehavior);
+        assertEquals("return index", -1L, retIndex);
+
+        verify(mockReplicatedLog, never()).snapshotPreCommit(anyLong(), anyLong());
+        verify(mockReplicatedLog, never()).snapshotCommit();
+
+        // Trim index is greater than replicatedToAllIndex so should update it.
+        verify(mockRaftActorBehavior).setReplicatedToAllIndex(10L);
     }
 
     @Test
@@ -507,12 +590,9 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
         assertEquals(true, snapshotManager.isCapturing());
 
-        ElectionTerm mockElectionTerm = mock(ElectionTerm.class);
         ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
         doReturn(20L).when(mockRaftActorContext).getLastApplied();
         doReturn(true).when(mockReplicatedLog).isPresent(10);
-        doReturn(mockElectionTerm).when(mockRaftActorContext).getTermInformation();
-        doReturn(5L).when(mockElectionTerm).getCurrentTerm();
         doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
         doReturn(5L).when(replicatedLogEntry).getTerm();
 
@@ -532,12 +612,9 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
         assertEquals(true, snapshotManager.isCapturing());
 
-        ElectionTerm mockElectionTerm = mock(ElectionTerm.class);
         ReplicatedLogEntry replicatedLogEntry = mock(ReplicatedLogEntry.class);
         doReturn(20L).when(mockRaftActorContext).getLastApplied();
         doReturn(true).when(mockReplicatedLog).isPresent(10);
-        doReturn(mockElectionTerm).when(mockRaftActorContext).getTermInformation();
-        doReturn(5L).when(mockElectionTerm).getCurrentTerm();
         doReturn(replicatedLogEntry).when((mockReplicatedLog)).get(10);
         doReturn(5L).when(replicatedLogEntry).getTerm();
 
@@ -548,4 +625,48 @@ public class SnapshotManagerTest extends AbstractActorTest {
 
     }
 
+    @Test
+    public void testLastAppliedTermInformationReader() {
+
+        LastAppliedTermInformationReader reader = new LastAppliedTermInformationReader();
+
+        doReturn(4L).when(mockReplicatedLog).getSnapshotTerm();
+        doReturn(7L).when(mockReplicatedLog).getSnapshotIndex();
+
+        ReplicatedLogEntry lastLogEntry = new MockRaftActorContext.MockReplicatedLogEntry(6L, 9L,
+                new MockRaftActorContext.MockPayload());
+
+        // No followers and valid lastLogEntry
+        reader.init(mockReplicatedLog, 1L, lastLogEntry, false);
+
+        assertEquals("getTerm", 6L, reader.getTerm());
+        assertEquals("getIndex", 9L, reader.getIndex());
+
+        // No followers and null lastLogEntry
+        reader.init(mockReplicatedLog, 1L, null, false);
+
+        assertEquals("getTerm", -1L, reader.getTerm());
+        assertEquals("getIndex", -1L, reader.getIndex());
+
+        // Followers and valid originalIndex entry
+        doReturn(new MockRaftActorContext.MockReplicatedLogEntry(5L, 8L,
+                new MockRaftActorContext.MockPayload())).when(mockReplicatedLog).get(8L);
+        reader.init(mockReplicatedLog, 8L, lastLogEntry, true);
+
+        assertEquals("getTerm", 5L, reader.getTerm());
+        assertEquals("getIndex", 8L, reader.getIndex());
+
+        // Followers and null originalIndex entry and valid snapshot index
+        reader.init(mockReplicatedLog, 7L, lastLogEntry, true);
+
+        assertEquals("getTerm", 4L, reader.getTerm());
+        assertEquals("getIndex", 7L, reader.getIndex());
+
+        // Followers and null originalIndex entry and invalid snapshot index
+        doReturn(-1L).when(mockReplicatedLog).getSnapshotIndex();
+        reader.init(mockReplicatedLog, 7L, lastLogEntry, true);
+
+        assertEquals("getTerm", -1L, reader.getTerm());
+        assertEquals("getIndex", -1L, reader.getIndex());
+    }
 }
\ No newline at end of file
index 2eee0e8099923993e7ca57dfd89747a103c4cd0e..678ac34e39d25d54fc12316fc17df1d51c5ed28f 100644 (file)
@@ -10,8 +10,11 @@ package org.opendaylight.controller.md.sal.binding.api;
 import java.util.Collection;
 import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
-import org.opendaylight.yangtools.concepts.Identifiable;
+import org.opendaylight.yangtools.yang.binding.Augmentation;
+import org.opendaylight.yangtools.yang.binding.ChildOf;
 import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.Identifiable;
+import org.opendaylight.yangtools.yang.binding.Identifier;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
 
 /**
@@ -20,7 +23,7 @@ import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
  * Represents modification of Data Object.
  *
  */
-public interface DataObjectModification<T extends DataObject> extends Identifiable<PathArgument> {
+public interface DataObjectModification<T extends DataObject> extends org.opendaylight.yangtools.concepts.Identifiable<PathArgument> {
 
     enum ModificationType {
         /**
@@ -76,5 +79,56 @@ public interface DataObjectModification<T extends DataObject> extends Identifiab
      */
     @Nonnull Collection<DataObjectModification<? extends DataObject>> getModifiedChildren();
 
+    /**
+     * Returns container child modification if {@code child} was modified by this
+     * modification.
+     *
+     * For accessing all modified list items consider iterating over {@link #getModifiedChildren()}.
+     *
+     * @param child Type of child - must be only container
+     * @return Modification of {@code child} if {@code child} was modified, null otherwise.
+     * @throws IllegalArgumentException If supplied {@code child} class is not valid child according
+     *         to generated model.
+     */
+    @Nullable <C extends ChildOf<? super T>> DataObjectModification<C> getModifiedChildContainer(@Nonnull Class<C> child);
+
+    /**
+     * Returns augmentation child modification if {@code augmentation} was modified by this
+     * modification.
+     *
+     * For accessing all modified list items consider iterating over {@link #getModifiedChildren()}.
+     *
+     * @param augmentation Type of augmentation - must be only container
+     * @return Modification of {@code augmentation} if {@code augmentation} was modified, null otherwise.
+     * @throws IllegalArgumentException If supplied {@code augmentation} class is not valid augmentation
+     *         according to generated model.
+     */
+    @Nullable <C extends Augmentation<T> & DataObject> DataObjectModification<C> getModifiedAugmentation(@Nonnull Class<C> augmentation);
+
+
+    /**
+     * Returns child list item modification if {@code child} was modified by this modification.
+     *
+     * @param listItem Type of list item - must be list item with key
+     * @param listKey List item key
+     * @return Modification of {@code child} if {@code child} was modified, null otherwise.
+     * @throws IllegalArgumentException If supplied {@code listItem} class is not valid child according
+     *         to generated model.
+     */
+    <C extends Identifiable<K> & ChildOf<? super T>, K extends Identifier<C>> DataObjectModification<C> getModifiedChildListItem(
+            @Nonnull Class<C> listItem,@Nonnull  K listKey);
+
+    /**
+     * Returns a child modification if a node identified by {@code childArgument} was modified by
+     * this modification.
+     *
+     * @param childArgument Path Argument of child node
+     * @return Modification of child identified by {@code childArgument} if {@code childArgument}
+     *         was modified, null otherwise.
+     * @throws IllegalArgumentException If supplied path argument is not valid child according to
+     *         generated model.
+     *
+     */
+    @Nullable DataObjectModification<? extends DataObject> getModifiedChild(PathArgument childArgument);
 
 }
index 6b1df719ac672f12c24bbe167b9094772ed9c4a9..93ab968451be9882e91379c867ae11a13dc111ad 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.md.sal.binding.api;
 import java.util.Collection;
 import java.util.EventListener;
 import javax.annotation.Nonnull;
+import org.opendaylight.yangtools.yang.binding.DataObject;
 
 /**
  * Interface implemented by classes interested in receiving notifications about
@@ -17,7 +18,7 @@ import javax.annotation.Nonnull;
  * in that it provides a cursor-based view of the change, which has potentially
  * lower overhead and allow more flexible consumption of change event.
  */
-public interface DataTreeChangeListener extends EventListener {
+public interface DataTreeChangeListener<T extends DataObject> extends EventListener {
     /**
      * Invoked when there was data change for the supplied path, which was used
      * to register this listener.
@@ -39,5 +40,5 @@ public interface DataTreeChangeListener extends EventListener {
      *
      * @param changes Collection of change events, may not be null or empty.
      */
-    void onDataTreeChanged(@Nonnull Collection<DataTreeModification> changes);
+    void onDataTreeChanged(@Nonnull Collection<DataTreeModification<T>> changes);
 }
index ae4e36f14acb31653396d014facd18617f637e35..9d12e449169d74e89663f727ad37476582f2065d 100644 (file)
@@ -9,6 +9,7 @@ package org.opendaylight.controller.md.sal.binding.api;
 
 import javax.annotation.Nonnull;
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
 
 /**
  * A {@link DOMService} which allows users to register for changes to a
@@ -50,5 +51,5 @@ public interface DataTreeChangeService extends BindingService {
      *         your listener using {@link ListenerRegistration#close()} to stop
      *         delivery of change events.
      */
-    @Nonnull <L extends DataTreeChangeListener> ListenerRegistration<L> registerDataTreeChangeListener(@Nonnull DataTreeIdentifier treeId, @Nonnull L listener);
+    @Nonnull <T extends DataObject,L extends DataTreeChangeListener<T>> ListenerRegistration<L> registerDataTreeChangeListener(@Nonnull DataTreeIdentifier<T> treeId, @Nonnull L listener);
 }
\ No newline at end of file
index 428957e988869d1b0641c42fa21bdc739b767993..c1c23d5e6fc7d3d17f24c9a7613375fb6b4881e3 100644 (file)
@@ -13,18 +13,19 @@ import javax.annotation.Nonnull;
 import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
 import org.opendaylight.yangtools.concepts.Immutable;
 import org.opendaylight.yangtools.concepts.Path;
+import org.opendaylight.yangtools.yang.binding.DataObject;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
 
 /**
  * A unique identifier for a particular subtree. It is composed of the logical
  * data store type and the instance identifier of the root node.
  */
-public final class DataTreeIdentifier implements Immutable, Path<DataTreeIdentifier>, Serializable {
+public final class DataTreeIdentifier<T extends DataObject> implements Immutable, Path<DataTreeIdentifier<?>>, Serializable {
     private static final long serialVersionUID = 1L;
-    private final InstanceIdentifier<?> rootIdentifier;
+    private final InstanceIdentifier<T> rootIdentifier;
     private final LogicalDatastoreType datastoreType;
 
-    public DataTreeIdentifier(final LogicalDatastoreType datastoreType, final InstanceIdentifier<?> rootIdentifier) {
+    public DataTreeIdentifier(final LogicalDatastoreType datastoreType, final InstanceIdentifier<T> rootIdentifier) {
         this.datastoreType = Preconditions.checkNotNull(datastoreType);
         this.rootIdentifier = Preconditions.checkNotNull(rootIdentifier);
     }
@@ -48,7 +49,7 @@ public final class DataTreeIdentifier implements Immutable, Path<DataTreeIdentif
     }
 
     @Override
-    public boolean contains(final DataTreeIdentifier other) {
+    public boolean contains(final DataTreeIdentifier<?> other) {
         return datastoreType == other.datastoreType && rootIdentifier.contains(other.rootIdentifier);
     }
 
@@ -69,7 +70,7 @@ public final class DataTreeIdentifier implements Immutable, Path<DataTreeIdentif
         if (!(obj instanceof DataTreeIdentifier)) {
             return false;
         }
-        DataTreeIdentifier other = (DataTreeIdentifier) obj;
+        final DataTreeIdentifier<?> other = (DataTreeIdentifier<?>) obj;
         if (datastoreType != other.datastoreType) {
             return false;
         }
index aac51a6a4cfa349d4c5bf602c71c12a169dd8395..8163baceac95b449fdc57c50ad81c2d7b2322ddb 100644 (file)
@@ -17,7 +17,7 @@ import org.opendaylight.yangtools.yang.binding.DataObject;
  * @author Tony Tkacik &lt;ttkacik@cisco.com&gt;
  *
  */
-public interface DataTreeModification {
+public interface DataTreeModification<T extends DataObject> {
 
     /**
      * Get the modification root path. This is the path of the root node
@@ -25,13 +25,13 @@ public interface DataTreeModification {
      *
      * @return absolute path of the root node
      */
-    @Nonnull DataTreeIdentifier getRootPath();
+    @Nonnull DataTreeIdentifier<T> getRootPath();
 
     /**
      * Get the modification root node.
      *
      * @return modification root node
      */
-    @Nonnull DataObjectModification<? extends DataObject> getRootNode();
+    @Nonnull DataObjectModification<T> getRootNode();
 
 }
index b17be166156574b9a7381fae6155682cd40044a9..1c43f1287648d4ef48d3fef5f0a15e41e54c1e7d 100644 (file)
@@ -13,14 +13,19 @@ import com.google.common.collect.ImmutableSet;
 import java.util.Set;
 import org.opendaylight.controller.md.sal.binding.api.BindingTransactionChain;
 import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeService;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
 import org.opendaylight.controller.md.sal.binding.api.ReadOnlyTransaction;
 import org.opendaylight.controller.md.sal.binding.api.ReadWriteTransaction;
 import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
 import org.opendaylight.controller.md.sal.binding.impl.BindingDOMAdapterBuilder.Factory;
 import org.opendaylight.controller.md.sal.common.api.data.TransactionChainListener;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
 import org.opendaylight.controller.md.sal.dom.api.DOMService;
-import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
 
 /**
  * The DataBrokerImpl simply defers to the DOMDataBroker for all its operations.
@@ -32,7 +37,7 @@ import org.opendaylight.controller.sal.core.api.model.SchemaService;
  *
 
  */
-public class BindingDOMDataBrokerAdapter extends AbstractForwardedDataBroker implements DataBroker {
+public class BindingDOMDataBrokerAdapter extends AbstractForwardedDataBroker implements DataBroker, DataTreeChangeService {
 
 
     static final Factory<DataBroker> BUILDER_FACTORY = new BindingDOMAdapterBuilder.Factory<DataBroker>() {
@@ -43,14 +48,16 @@ public class BindingDOMDataBrokerAdapter extends AbstractForwardedDataBroker imp
         }
 
     };
+    private final DataTreeChangeService treeChangeService;
 
     public BindingDOMDataBrokerAdapter(final DOMDataBroker domDataBroker, final BindingToNormalizedNodeCodec codec) {
         super(domDataBroker, codec);
-    }
-
-    @Deprecated
-    public BindingDOMDataBrokerAdapter(final DOMDataBroker domDataBroker, final BindingToNormalizedNodeCodec codec, final SchemaService schemaService) {
-        super(domDataBroker, codec,schemaService);
+        final DOMDataTreeChangeService domTreeChange = (DOMDataTreeChangeService) domDataBroker.getSupportedExtensions().get(DOMDataTreeChangeService.class);
+        if(domTreeChange != null) {
+            treeChangeService = BindingDOMDataTreeChangeServiceAdapter.create(codec, domTreeChange);
+        } else {
+            treeChangeService = null;
+        }
     }
 
     @Override
@@ -82,13 +89,21 @@ public class BindingDOMDataBrokerAdapter extends AbstractForwardedDataBroker imp
         }
 
         @Override
-        protected DataBroker createInstance(BindingToNormalizedNodeCodec codec,
-                ClassToInstanceMap<DOMService> delegates) {
-            DOMDataBroker domDataBroker = delegates.getInstance(DOMDataBroker.class);
+        protected DataBroker createInstance(final BindingToNormalizedNodeCodec codec,
+                final ClassToInstanceMap<DOMService> delegates) {
+            final DOMDataBroker domDataBroker = delegates.getInstance(DOMDataBroker.class);
             return new BindingDOMDataBrokerAdapter(domDataBroker, codec);
         }
 
+    }
 
-
+    @Override
+    public <T extends DataObject, L extends DataTreeChangeListener<T>> ListenerRegistration<L> registerDataTreeChangeListener(
+            final DataTreeIdentifier<T> treeId, final L listener) {
+        if(treeChangeService == null) {
+            throw new UnsupportedOperationException("Underlying data broker does not expose DOMDataTreeChangeService.");
+        }
+        return treeChangeService.registerDataTreeChangeListener(treeId, listener);
     }
+
 }
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeListenerAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeListenerAdapter.java
new file mode 100644 (file)
index 0000000..ab1348f
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * Adapter wrapping Binding {@link DataTreeChangeListener} and exposing
+ * it as {@link DOMDataTreeChangeListener} and translated DOM events
+ * to their Binding equivalent.
+ *
+ */
+final class BindingDOMDataTreeChangeListenerAdapter<T extends DataObject> implements DOMDataTreeChangeListener {
+
+    private final BindingToNormalizedNodeCodec codec;
+    private final DataTreeChangeListener<T> listener;
+    private final LogicalDatastoreType store;
+
+    BindingDOMDataTreeChangeListenerAdapter(final BindingToNormalizedNodeCodec codec, final DataTreeChangeListener<T> listener,
+            final LogicalDatastoreType store) {
+        this.codec = Preconditions.checkNotNull(codec);
+        this.listener = Preconditions.checkNotNull(listener);
+        this.store = Preconditions.checkNotNull(store);
+    }
+
+    @Override
+    public void onDataTreeChanged(final Collection<DataTreeCandidate> domChanges) {
+        final Collection<DataTreeModification<T>> bindingChanges = LazyDataTreeModification.from(codec, domChanges, store);
+        listener.onDataTreeChanged(bindingChanges);
+    }
+}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeServiceAdapter.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDOMDataTreeChangeServiceAdapter.java
new file mode 100644 (file)
index 0000000..ad0ab54
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeService;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeService;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeIdentifier;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+
+/**
+ *
+ * Adapter exposing Binding {@link DataTreeChangeService} and wrapping
+ * {@link DOMDataTreeChangeService} and is responsible for translation
+ * and instantiation of {@link BindingDOMDataTreeChangeListenerAdapter}
+ * adapters.
+ *
+ * Each registered {@link DataTreeChangeListener} is wrapped using
+ * adapter and registered directly to DOM service.
+ */
+final class BindingDOMDataTreeChangeServiceAdapter implements DataTreeChangeService {
+
+    private final BindingToNormalizedNodeCodec codec;
+    private final DOMDataTreeChangeService dataTreeChangeService;
+
+    private BindingDOMDataTreeChangeServiceAdapter(final BindingToNormalizedNodeCodec codec,
+            final DOMDataTreeChangeService dataTreeChangeService) {
+        this.codec = Preconditions.checkNotNull(codec);
+        this.dataTreeChangeService = Preconditions.checkNotNull(dataTreeChangeService);
+    }
+
+    static DataTreeChangeService create(final BindingToNormalizedNodeCodec codec,
+            final DOMDataTreeChangeService dataTreeChangeService) {
+        return new BindingDOMDataTreeChangeServiceAdapter(codec, dataTreeChangeService);
+    }
+
+    @Override
+    public <T extends DataObject, L extends DataTreeChangeListener<T>> ListenerRegistration<L> registerDataTreeChangeListener(
+            final DataTreeIdentifier<T> treeId, final L listener) {
+        final DOMDataTreeIdentifier domIdentifier = toDomTreeIdentifier(treeId);
+        final BindingDOMDataTreeChangeListenerAdapter<T> domListener = new BindingDOMDataTreeChangeListenerAdapter<>(codec,listener, treeId.getDatastoreType());
+        final ListenerRegistration<BindingDOMDataTreeChangeListenerAdapter<T>> domReg = dataTreeChangeService.registerDataTreeChangeListener(domIdentifier, domListener);
+        return new BindingDataTreeChangeListenerRegistration<>(listener,domReg);
+    }
+
+    private DOMDataTreeIdentifier toDomTreeIdentifier(final DataTreeIdentifier<?> treeId) {
+        final YangInstanceIdentifier domPath = codec.toYangInstanceIdentifier(treeId.getRootIdentifier());
+        return new DOMDataTreeIdentifier(treeId.getDatastoreType(), domPath);
+    }
+}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDataTreeChangeListenerRegistration.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingDataTreeChangeListenerRegistration.java
new file mode 100644 (file)
index 0000000..8a92e5f
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+class BindingDataTreeChangeListenerRegistration<L extends DataTreeChangeListener<?>> extends AbstractListenerRegistration<L> {
+
+    private final ListenerRegistration<?> domReg;
+
+    BindingDataTreeChangeListenerRegistration(final L listener, final ListenerRegistration<?> domReg) {
+        super(listener);
+        this.domReg = Preconditions.checkNotNull(domReg);
+    }
+
+    @Override
+    protected void removeRegistration() {
+        domReg.close();
+    }
+}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingStructuralType.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/BindingStructuralType.java
new file mode 100644 (file)
index 0000000..7cd17dc
--- /dev/null
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Optional;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.AugmentationIdentifier;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeIdentifierWithPredicates;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.NodeWithValue;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.AnyXmlNode;
+import org.opendaylight.yangtools.yang.data.api.schema.AugmentationNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ChoiceNode;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.LeafSetNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapEntryNode;
+import org.opendaylight.yangtools.yang.data.api.schema.MapNode;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+
+/**
+ *
+ * Defines structural mapping of Normalized Node to Binding data
+ * addressable by Instance Identifier.
+ *
+ * Not all binding data are addressable by instance identifier
+ * and there are some differences.
+ *
+ * See {@link #NOT_ADDRESSABLE},{@link #INVISIBLE_CONTAINER},{@link #VISIBLE_CONTAINER}
+ * for more details.
+ *
+ *
+ */
+enum BindingStructuralType {
+
+    /**
+     * DOM Item is not addressable in Binding Instance Identifier,
+     * data is not lost, but are available only via parent object.
+     *
+     * Such types of data are leaf-lists, leafs, list without keys
+     * or anyxml.
+     *
+     */
+    NOT_ADDRESSABLE,
+    /**
+     * Data container is addressable in NormalizedNode format,
+     * but in Binding it is not represented in Instance Identifier.
+     *
+     * This are choice / case nodes.
+     *
+     * This data is still accessible using parent object and their
+     * children are addressable.
+     *
+     */
+    INVISIBLE_CONTAINER,
+    /**
+     * Data container is addressable in NormalizedNode format,
+     * but in Binding it is not represented in Instance Identifier.
+     *
+     * This are list nodes.
+     *
+     * This data is still accessible using parent object and their
+     * children are addressable.
+     *
+     */
+    INVISIBLE_LIST,
+    /**
+     * Data container is addressable in Binding Instance Identifier format
+     * and also YangInstanceIdentifier format.
+     *
+     */
+    VISIBLE_CONTAINER,
+    /**
+     * Mapping algorithm was unable to detect type or was not updated after introduction
+     * of new NormalizedNode type.
+     */
+    UNKNOWN;
+
+    static BindingStructuralType from(final DataTreeCandidateNode domChildNode) {
+        final Optional<NormalizedNode<?, ?>> dataBased = domChildNode.getDataAfter().or(domChildNode.getDataBefore());
+        if(dataBased.isPresent()) {
+            return from(dataBased.get());
+        }
+        return from(domChildNode.getIdentifier());
+    }
+
+    private static BindingStructuralType from(final PathArgument identifier) {
+        if(identifier instanceof NodeIdentifierWithPredicates || identifier instanceof AugmentationIdentifier) {
+            return VISIBLE_CONTAINER;
+        }
+        if(identifier instanceof NodeWithValue) {
+            return NOT_ADDRESSABLE;
+        }
+        return UNKNOWN;
+    }
+
+    static BindingStructuralType from(final NormalizedNode<?, ?> data) {
+        if(isNotAddressable(data)) {
+            return NOT_ADDRESSABLE;
+        }
+        if(data instanceof MapNode) {
+            return INVISIBLE_LIST;
+        }
+        if(data instanceof ChoiceNode) {
+            return INVISIBLE_CONTAINER;
+        }
+        if(isVisibleContainer(data)) {
+            return VISIBLE_CONTAINER;
+        }
+        return UNKNOWN;
+    }
+
+    private static boolean isVisibleContainer(final NormalizedNode<?, ?> data) {
+        return data instanceof MapEntryNode || data instanceof ContainerNode || data instanceof AugmentationNode;
+    }
+
+    private static boolean isNotAddressable(final NormalizedNode<?, ?> d) {
+        return d instanceof LeafNode
+                || d instanceof AnyXmlNode
+                || d instanceof LeafSetNode
+                || d instanceof LeafSetEntryNode;
+    }
+
+}
index 270ae504750dad1728dc77860f2238e4f3f76758..b727e5317b087cb7784cb83be78e596dc397698e 100644 (file)
@@ -9,9 +9,12 @@ package org.opendaylight.controller.md.sal.binding.impl;
 
 import com.google.common.base.Function;
 import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableBiMap;
 import java.lang.reflect.Method;
+import java.util.AbstractMap.SimpleEntry;
 import java.util.Iterator;
+import java.util.Map;
 import java.util.Map.Entry;
 import javax.annotation.Nonnull;
 import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizationException;
@@ -19,17 +22,22 @@ import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizat
 import org.opendaylight.controller.md.sal.common.impl.util.compat.DataNormalizer;
 import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTree;
 import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeFactory;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeNode;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingNormalizedNodeSerializer;
 import org.opendaylight.yangtools.binding.data.codec.impl.BindingNormalizedNodeCodecRegistry;
 import org.opendaylight.yangtools.sal.binding.generator.impl.GeneratedClassLoadingStrategy;
 import org.opendaylight.yangtools.sal.binding.generator.util.BindingRuntimeContext;
 import org.opendaylight.yangtools.yang.binding.BindingMapping;
+import org.opendaylight.yangtools.yang.binding.DataContainer;
 import org.opendaylight.yangtools.yang.binding.DataObject;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.Notification;
 import org.opendaylight.yangtools.yang.binding.RpcService;
 import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
 import org.opendaylight.yangtools.yang.common.QNameModule;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.data.impl.codec.DeserializationException;
 import org.opendaylight.yangtools.yang.model.api.Module;
@@ -38,7 +46,7 @@ import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
 import org.opendaylight.yangtools.yang.model.api.SchemaPath;
 
-public class BindingToNormalizedNodeCodec implements BindingCodecTreeFactory, SchemaContextListener, AutoCloseable {
+public class BindingToNormalizedNodeCodec implements BindingCodecTreeFactory, BindingNormalizedNodeSerializer, SchemaContextListener, AutoCloseable {
 
     private final BindingNormalizedNodeCodecRegistry codecRegistry;
     private DataNormalizer legacyToNormalized;
@@ -56,16 +64,52 @@ public class BindingToNormalizedNodeCodec implements BindingCodecTreeFactory, Sc
         return codecRegistry.toYangInstanceIdentifier(binding);
     }
 
-    @SuppressWarnings({ "unchecked", "rawtypes" })
-    public Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> toNormalizedNode(
-            final InstanceIdentifier<? extends DataObject> bindingPath, final DataObject bindingObject) {
-        return codecRegistry.toNormalizedNode((InstanceIdentifier) bindingPath, bindingObject);
+    @Override
+    public YangInstanceIdentifier toYangInstanceIdentifier(final InstanceIdentifier<?> binding) {
+        return codecRegistry.toYangInstanceIdentifier(binding);
+    }
 
+    @Override
+    public <T extends DataObject> Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> toNormalizedNode(
+            final InstanceIdentifier<T> path, final T data) {
+        return codecRegistry.toNormalizedNode(path, data);
     }
 
+    @SuppressWarnings({"unchecked", "rawtypes"})
     public Entry<YangInstanceIdentifier, NormalizedNode<?, ?>> toNormalizedNode(
             final Entry<InstanceIdentifier<? extends DataObject>, DataObject> binding) {
-        return toNormalizedNode(binding.getKey(),binding.getValue());
+        return toNormalizedNode((InstanceIdentifier) binding.getKey(),binding.getValue());
+    }
+
+    @Override
+    public Entry<InstanceIdentifier<?>, DataObject> fromNormalizedNode(final YangInstanceIdentifier path,
+            final NormalizedNode<?, ?> data) {
+        return codecRegistry.fromNormalizedNode(path, data);
+    }
+
+    @Override
+    public Notification fromNormalizedNodeNotification(final SchemaPath path, final ContainerNode data) {
+        return codecRegistry.fromNormalizedNodeNotification(path, data);
+    }
+
+    @Override
+    public DataObject fromNormalizedNodeRpcData(final SchemaPath path, final ContainerNode data) {
+        return codecRegistry.fromNormalizedNodeRpcData(path, data);
+    }
+
+    @Override
+    public InstanceIdentifier<?> fromYangInstanceIdentifier(final YangInstanceIdentifier dom) {
+        return codecRegistry.fromYangInstanceIdentifier(dom);
+    }
+
+    @Override
+    public ContainerNode toNormalizedNodeNotification(final Notification data) {
+        return codecRegistry.toNormalizedNodeNotification(data);
+    }
+
+    @Override
+    public ContainerNode toNormalizedNodeRpcData(final DataContainer data) {
+        return codecRegistry.toNormalizedNodeRpcData(data);
     }
 
     /**
@@ -185,13 +229,27 @@ public class BindingToNormalizedNodeCodec implements BindingCodecTreeFactory, Sc
     }
 
     @Override
-    public BindingCodecTree create(BindingRuntimeContext context) {
+    public BindingCodecTree create(final BindingRuntimeContext context) {
         return codecRegistry.create(context);
     }
 
     @Override
-    public BindingCodecTree create(SchemaContext context, Class<?>... bindingClasses) {
+    public BindingCodecTree create(final SchemaContext context, final Class<?>... bindingClasses) {
         return codecRegistry.create(context, bindingClasses);
     }
 
+    @Nonnull protected Map.Entry<InstanceIdentifier<?>, BindingCodecTreeNode<?>> getSubtreeCodec(
+            final YangInstanceIdentifier domIdentifier) {
+
+        final BindingCodecTree currentCodecTree = codecRegistry.getCodecContext();
+        final InstanceIdentifier<?> bindingPath = codecRegistry.fromYangInstanceIdentifier(domIdentifier);
+        Preconditions.checkArgument(bindingPath != null);
+        /**
+         * If we are able to deserialize YANG instance identifier, getSubtreeCodec must
+         * return non-null value.
+         */
+        final BindingCodecTreeNode<?> codecContext = currentCodecTree.getSubtreeCodec(bindingPath);
+        return new SimpleEntry<InstanceIdentifier<?>, BindingCodecTreeNode<?>>(bindingPath, codecContext);
+    }
+
 }
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDataObjectModification.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDataObjectModification.java
new file mode 100644 (file)
index 0000000..a165242
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Preconditions;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeNode;
+import org.opendaylight.yangtools.yang.binding.Augmentation;
+import org.opendaylight.yangtools.yang.binding.ChildOf;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.Identifiable;
+import org.opendaylight.yangtools.yang.binding.Identifier;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Lazily translated {@link DataObjectModification} based on {@link DataTreeCandidateNode}.
+ *
+ * {@link LazyDataObjectModification} represents Data tree change event,
+ * but whole tree is not translated or resolved eagerly, but only child nodes
+ * which are directly accessed by user of data object modification.
+ *
+ * @param <T> Type of Binding Data Object
+ */
+class LazyDataObjectModification<T extends DataObject> implements DataObjectModification<T> {
+
+    private final static Logger LOG = LoggerFactory.getLogger(LazyDataObjectModification.class);
+
+    private final BindingCodecTreeNode<T> codec;
+    private final DataTreeCandidateNode domData;
+    private final PathArgument identifier;
+    private Collection<DataObjectModification<? extends DataObject>> childNodesCache;
+
+    private LazyDataObjectModification(final BindingCodecTreeNode<T> codec, final DataTreeCandidateNode domData) {
+        this.codec = Preconditions.checkNotNull(codec);
+        this.domData = Preconditions.checkNotNull(domData);
+        this.identifier = codec.deserializePathArgument(domData.getIdentifier());
+    }
+
+    static <T extends DataObject> DataObjectModification<T> create(final BindingCodecTreeNode<T> codec,
+            final DataTreeCandidateNode domData) {
+        return new LazyDataObjectModification<>(codec,domData);
+    }
+
+    static Collection<DataObjectModification<? extends DataObject>> from(final BindingCodecTreeNode<?> parentCodec,
+            final Collection<DataTreeCandidateNode> domChildNodes) {
+        final ArrayList<DataObjectModification<? extends DataObject>> result = new ArrayList<>(domChildNodes.size());
+        populateList(result, parentCodec, domChildNodes);
+        return result;
+    }
+
+    private static void populateList(final List<DataObjectModification<? extends DataObject>> result,
+            final BindingCodecTreeNode<?> parentCodec, final Collection<DataTreeCandidateNode> domChildNodes) {
+        for (final DataTreeCandidateNode domChildNode : domChildNodes) {
+            final BindingStructuralType type = BindingStructuralType.from(domChildNode);
+            if (type != BindingStructuralType.NOT_ADDRESSABLE) {
+                /*
+                 *  Even if type is UNKNOWN, from perspective of BindingStructuralType
+                 *  we try to load codec for it. We will use that type to further specify
+                 *  debug log.
+                 */
+                try {
+                    final BindingCodecTreeNode<?> childCodec =
+                            parentCodec.yangPathArgumentChild(domChildNode.getIdentifier());
+                    populateList(result,type, childCodec, domChildNode);
+                } catch (final IllegalArgumentException e) {
+                    if(type == BindingStructuralType.UNKNOWN) {
+                        LOG.debug("Unable to deserialize unknown DOM node {}",domChildNode,e);
+                    } else {
+                        LOG.debug("Binding representation for DOM node {} was not found",domChildNode,e);
+                    }
+                }
+            }
+        }
+    }
+
+
+    private static void populateList(final List<DataObjectModification<? extends DataObject>> result,
+            final BindingStructuralType type, final BindingCodecTreeNode<?> childCodec,
+            final DataTreeCandidateNode domChildNode) {
+        switch (type) {
+            case INVISIBLE_LIST:
+                // We use parent codec intentionally.
+                populateListWithSingleCodec(result, childCodec, domChildNode.getChildNodes());
+                break;
+            case INVISIBLE_CONTAINER:
+                populateList(result, childCodec, domChildNode.getChildNodes());
+                break;
+            case UNKNOWN:
+            case VISIBLE_CONTAINER:
+                result.add(create(childCodec, domChildNode));
+            default:
+                break;
+        }
+    }
+
+    private static void populateListWithSingleCodec(final List<DataObjectModification<? extends DataObject>> result,
+            final BindingCodecTreeNode<?> codec, final Collection<DataTreeCandidateNode> childNodes) {
+        for (final DataTreeCandidateNode child : childNodes) {
+            result.add(create(codec, child));
+        }
+    }
+
+    @Override
+    public T getDataAfter() {
+        return deserialize(domData.getDataAfter());
+    }
+
+    @Override
+    public Class<T> getDataType() {
+        return codec.getBindingClass();
+    }
+
+    @Override
+    public PathArgument getIdentifier() {
+        return identifier;
+    }
+
+    @Override
+    public DataObjectModification.ModificationType getModificationType() {
+        switch(domData.getModificationType()) {
+            case WRITE:
+                return DataObjectModification.ModificationType.WRITE;
+            case SUBTREE_MODIFIED:
+                return DataObjectModification.ModificationType.SUBTREE_MODIFIED;
+            case DELETE:
+                return DataObjectModification.ModificationType.DELETE;
+
+            default:
+                // TODO: Should we lie about modification type instead of exception?
+                throw new IllegalStateException("Unsupported DOM Modification type " + domData.getModificationType());
+        }
+    }
+
+    @Override
+    public Collection<DataObjectModification<? extends DataObject>> getModifiedChildren() {
+        if(childNodesCache == null) {
+            childNodesCache = from(codec,domData.getChildNodes());
+        }
+        return childNodesCache;
+    }
+
+    @Override
+    public DataObjectModification<? extends DataObject> getModifiedChild(final PathArgument arg) {
+        final List<YangInstanceIdentifier.PathArgument> domArgumentList = new ArrayList<>();
+        final BindingCodecTreeNode<?> childCodec = codec.bindingPathArgumentChild(arg, domArgumentList);
+        final Iterator<YangInstanceIdentifier.PathArgument> toEnter = domArgumentList.iterator();
+        DataTreeCandidateNode current = domData;
+        while (toEnter.hasNext() && current != null) {
+            current = current.getModifiedChild(toEnter.next());
+        }
+        if (current != null) {
+            return create(childCodec, current);
+        }
+        return null;
+    }
+
+    @Override
+    @SuppressWarnings("unchecked")
+    public <C extends Identifiable<K> & ChildOf<? super T>, K extends Identifier<C>> DataObjectModification<C> getModifiedChildListItem(
+            final Class<C> listItem, final K listKey) {
+        return (DataObjectModification<C>) getModifiedChild(new InstanceIdentifier.IdentifiableItem<>(listItem, listKey));
+    }
+
+    @Override
+    @SuppressWarnings("unchecked")
+    public <C extends ChildOf<? super T>> DataObjectModification<C> getModifiedChildContainer(final Class<C> arg) {
+        return (DataObjectModification<C>) getModifiedChild(new InstanceIdentifier.Item<>(arg));
+    }
+
+    @Override
+    @SuppressWarnings("unchecked")
+    public <C extends Augmentation<T> & DataObject> DataObjectModification<C> getModifiedAugmentation(
+            final Class<C> augmentation) {
+        return (DataObjectModification<C>) getModifiedChild(new InstanceIdentifier.Item<>(augmentation));
+    }
+
+    private T deserialize(final Optional<NormalizedNode<?, ?>> dataAfter) {
+        if(dataAfter.isPresent()) {
+            return codec.deserialize(dataAfter.get());
+        }
+        return null;
+    }
+}
diff --git a/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDataTreeModification.java b/opendaylight/md-sal/sal-binding-broker/src/main/java/org/opendaylight/controller/md/sal/binding/impl/LazyDataTreeModification.java
new file mode 100644 (file)
index 0000000..2a90f96
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.md.sal.binding.impl;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map.Entry;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeNode;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * Lazily translated {@link DataTreeModification} based on {@link DataTreeCandidate}.
+ *
+ * {@link DataTreeModification} represents Data tree change event,
+ * but whole tree is not translated or resolved eagerly, but only child nodes
+ * which are directly accessed by user of data object modification.
+ *
+ */
+class LazyDataTreeModification<T extends DataObject> implements DataTreeModification<T> {
+
+    private final DataTreeIdentifier<T> path;
+    private final DataObjectModification<T> rootNode;
+
+    LazyDataTreeModification(final LogicalDatastoreType datastoreType, final InstanceIdentifier<T> path, final BindingCodecTreeNode<T> codec, final DataTreeCandidate domChange) {
+        this.path = new DataTreeIdentifier<>(datastoreType, path);
+        this.rootNode = LazyDataObjectModification.create(codec, domChange.getRootNode());
+    }
+
+    @Override
+    public DataObjectModification<T> getRootNode() {
+        return rootNode;
+    }
+
+    @Override
+    public DataTreeIdentifier<T> getRootPath() {
+        return path;
+    }
+
+    @SuppressWarnings({"unchecked", "rawtypes"})
+    static <T extends DataObject> DataTreeModification<T> create(final BindingToNormalizedNodeCodec codec, final DataTreeCandidate domChange,
+            final LogicalDatastoreType datastoreType) {
+        final Entry<InstanceIdentifier<?>, BindingCodecTreeNode<?>> codecCtx =
+                codec.getSubtreeCodec(domChange.getRootPath());
+        return (DataTreeModification<T>) new LazyDataTreeModification(datastoreType, codecCtx.getKey(), codecCtx.getValue(), domChange);
+    }
+
+    static <T extends DataObject> Collection<DataTreeModification<T>> from(final BindingToNormalizedNodeCodec codec,
+            final Collection<DataTreeCandidate> domChanges, final LogicalDatastoreType datastoreType) {
+        final List<DataTreeModification<T>> result = new ArrayList<>(domChanges.size());
+        for (final DataTreeCandidate domChange : domChanges) {
+            result.add(LazyDataTreeModification.<T>create(codec, domChange, datastoreType));
+        }
+        return result;
+    }
+
+}
index e15cb833855647231fc2fda8a6ef83535b76e436..ee130fdeeb87502a8a15780264e0dbfd0d255280 100644 (file)
@@ -70,6 +70,7 @@ module opendaylight-sal-binding-broker-impl {
         base config:module-type;
         config:provided-service binding-dom-mapping-service;
         config:provided-service sal:binding-codec-tree-factory;
+        config:provided-service sal:binding-normalized-node-serializer;
         config:java-name-prefix RuntimeMapping;
     }
 
diff --git a/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/DataTreeChangeListenerTest.java b/opendaylight/md-sal/sal-binding-broker/src/test/java/org/opendaylight/controller/md/sal/binding/impl/test/DataTreeChangeListenerTest.java
new file mode 100644 (file)
index 0000000..888a628
--- /dev/null
@@ -0,0 +1,163 @@
+package org.opendaylight.controller.md.sal.binding.impl.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_BAR_KEY;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.TOP_FOO_KEY;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.USES_ONE_KEY;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.complexUsesAugment;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.path;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.top;
+import static org.opendaylight.controller.md.sal.test.model.util.ListsBindingUtils.topLevelList;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Iterables;
+import com.google.common.util.concurrent.SettableFuture;
+import java.util.Collection;
+import java.util.concurrent.TimeUnit;
+import org.junit.Test;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification;
+import org.opendaylight.controller.md.sal.binding.api.DataObjectModification.ModificationType;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeChangeListener;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeIdentifier;
+import org.opendaylight.controller.md.sal.binding.api.DataTreeModification;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.binding.impl.BindingDOMDataBrokerAdapter;
+import org.opendaylight.controller.md.sal.binding.test.AbstractDataBrokerTest;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.augment.rev140709.TreeComplexUsesAugment;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.Top;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.TwoLevelList;
+import org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.controller.md.sal.test.list.rev140701.two.level.list.TopLevelList;
+import org.opendaylight.yangtools.yang.binding.DataObject;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
+import org.opendaylight.yangtools.yang.binding.InstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.binding.YangModuleInfo;
+import org.opendaylight.yangtools.yang.binding.util.BindingReflections;
+
+public class DataTreeChangeListenerTest extends AbstractDataBrokerTest {
+
+    private static final InstanceIdentifier<Top> TOP_PATH = InstanceIdentifier.create(Top.class);
+    private static final PathArgument TOP_ARGUMENT= TOP_PATH.getPathArguments().iterator().next();
+    private static final InstanceIdentifier<TopLevelList> FOO_PATH = path(TOP_FOO_KEY);
+    private static final PathArgument FOO_ARGUMENT = Iterables.getLast(FOO_PATH.getPathArguments());
+    private static final TopLevelList FOO_DATA = topLevelList(TOP_FOO_KEY, complexUsesAugment(USES_ONE_KEY));
+    private static final InstanceIdentifier<TopLevelList> BAR_PATH = path(TOP_BAR_KEY);
+    private static final PathArgument BAR_ARGUMENT = Iterables.getLast(BAR_PATH.getPathArguments());
+    private static final TopLevelList BAR_DATA = topLevelList(TOP_BAR_KEY);
+private static final DataTreeIdentifier<Top> TOP_IDENTIFIER = new DataTreeIdentifier<Top>(LogicalDatastoreType.OPERATIONAL,
+            TOP_PATH);
+
+    private static final Top TOP_INITIAL_DATA = top(FOO_DATA);
+
+    private BindingDOMDataBrokerAdapter dataBrokerImpl;
+
+    private static final class EventCapturingListener<T extends DataObject> implements DataTreeChangeListener<T> {
+
+        private SettableFuture<Collection<DataTreeModification<T>>> changes = SettableFuture.create();
+
+        @Override
+        public void onDataTreeChanged(final Collection<DataTreeModification<T>> changes) {
+            this.changes.set(changes);
+
+        }
+
+        Collection<DataTreeModification<T>> nextEvent() throws Exception {
+            final Collection<DataTreeModification<T>> result = changes.get(200,TimeUnit.MILLISECONDS);
+            changes = SettableFuture.create();
+            return result;
+        }
+
+    }
+
+    @Override
+    protected Iterable<YangModuleInfo> getModuleInfos() throws Exception {
+        return ImmutableSet.of(
+                BindingReflections.getModuleInfo(TwoLevelList.class),
+                BindingReflections.getModuleInfo(TreeComplexUsesAugment.class)
+                );
+    }
+
+    @Override
+    protected void setupWithDataBroker(final DataBroker dataBroker) {
+        dataBrokerImpl = (BindingDOMDataBrokerAdapter) dataBroker;
+    }
+
+    @Test
+    public void testTopLevelListener() throws Exception {
+        final EventCapturingListener<Top> listener = new EventCapturingListener<>();
+        dataBrokerImpl.registerDataTreeChangeListener(TOP_IDENTIFIER, listener);
+
+        createAndVerifyTop(listener);
+
+        putTx(BAR_PATH, BAR_DATA).submit().checkedGet();
+        final DataObjectModification<Top> afterBarPutEvent = Iterables.getOnlyElement(listener.nextEvent()).getRootNode();
+        verifyModification(afterBarPutEvent, TOP_ARGUMENT, ModificationType.SUBTREE_MODIFIED);
+        final DataObjectModification<TopLevelList> barPutMod = afterBarPutEvent.getModifiedChildListItem(TopLevelList.class, TOP_BAR_KEY);
+        assertNotNull(barPutMod);
+        verifyModification(barPutMod, BAR_ARGUMENT, ModificationType.WRITE);
+
+        deleteTx(BAR_PATH).submit().checkedGet();
+        final DataObjectModification<Top> afterBarDeleteEvent = Iterables.getOnlyElement(listener.nextEvent()).getRootNode();
+        verifyModification(afterBarDeleteEvent, TOP_ARGUMENT, ModificationType.SUBTREE_MODIFIED);
+        final DataObjectModification<TopLevelList> barDeleteMod = afterBarDeleteEvent.getModifiedChildListItem(TopLevelList.class, TOP_BAR_KEY);
+        verifyModification(barDeleteMod, BAR_ARGUMENT, ModificationType.DELETE);
+    }
+
+    @Test
+    public void testWildcardedListListener() throws Exception {
+        final EventCapturingListener<TopLevelList> listener = new EventCapturingListener<>();
+        final DataTreeIdentifier<TopLevelList> wildcard = new DataTreeIdentifier<>(LogicalDatastoreType.OPERATIONAL, TOP_PATH.child(TopLevelList.class));
+        dataBrokerImpl.registerDataTreeChangeListener(wildcard, listener);
+
+        putTx(TOP_PATH, TOP_INITIAL_DATA).submit().checkedGet();
+
+        final DataTreeModification<TopLevelList> fooWriteEvent = Iterables.getOnlyElement(listener.nextEvent());
+        assertEquals(FOO_PATH, fooWriteEvent.getRootPath().getRootIdentifier());
+        verifyModification(fooWriteEvent.getRootNode(), FOO_ARGUMENT, ModificationType.WRITE);
+
+        putTx(BAR_PATH, BAR_DATA).submit().checkedGet();
+        final DataTreeModification<TopLevelList> barWriteEvent = Iterables.getOnlyElement(listener.nextEvent());
+        assertEquals(BAR_PATH, barWriteEvent.getRootPath().getRootIdentifier());
+        verifyModification(barWriteEvent.getRootNode(), BAR_ARGUMENT, ModificationType.WRITE);
+
+        deleteTx(BAR_PATH).submit().checkedGet();
+        final DataTreeModification<TopLevelList> barDeleteEvent = Iterables.getOnlyElement(listener.nextEvent());
+        assertEquals(BAR_PATH, barDeleteEvent.getRootPath().getRootIdentifier());
+        verifyModification(barDeleteEvent.getRootNode(), BAR_ARGUMENT, ModificationType.DELETE);
+    }
+
+
+
+    private void createAndVerifyTop(final EventCapturingListener<Top> listener) throws Exception {
+        putTx(TOP_PATH,TOP_INITIAL_DATA).submit().checkedGet();
+        final Collection<DataTreeModification<Top>> events = listener.nextEvent();
+
+        assertFalse("Non empty collection should be received.",events.isEmpty());
+        final DataTreeModification<Top> initialWrite = Iterables.getOnlyElement(events);
+        final DataObjectModification<? extends DataObject> initialNode = initialWrite.getRootNode();
+        verifyModification(initialNode,TOP_PATH.getPathArguments().iterator().next(),ModificationType.WRITE);
+        assertEquals(TOP_INITIAL_DATA, initialNode.getDataAfter());
+    }
+
+    private void verifyModification(final DataObjectModification<? extends DataObject> barWrite, final PathArgument pathArg,
+            final ModificationType eventType) {
+        assertEquals(pathArg.getType(), barWrite.getDataType());
+        assertEquals(eventType,barWrite.getModificationType());
+        assertEquals(pathArg, barWrite.getIdentifier());
+    }
+
+    private <T extends DataObject> WriteTransaction putTx(final InstanceIdentifier<T> path,final T data) {
+        final WriteTransaction tx = dataBrokerImpl.newWriteOnlyTransaction();
+        tx.put(LogicalDatastoreType.OPERATIONAL, path, data);
+        return tx;
+    }
+
+    private WriteTransaction deleteTx(final InstanceIdentifier<?> path) {
+        final WriteTransaction tx = dataBrokerImpl.newWriteOnlyTransaction();
+        tx.delete(LogicalDatastoreType.OPERATIONAL, path);
+        return tx;
+    }
+}
index 81508d1b8fc93483277f31abf23cdaff50547397..18a94dfacded8d6523f4195c6b44f71842c5be34 100644 (file)
@@ -48,6 +48,11 @@ module opendaylight-md-sal-binding {
         config:java-class "org.opendaylight.yangtools.binding.data.codec.api.BindingCodecTreeFactory";
     }
 
+    identity binding-normalized-node-serializer {
+        base "config:service-type";
+        config:java-class "org.opendaylight.yangtools.binding.data.codec.api.BindingNormalizedNodeSerializer";
+    }
+
     identity binding-notification-subscription-service {
         base "config:service-type";
         config:java-class "org.opendaylight.controller.sal.binding.api.NotificationService";
index 9f5bdd439efb102a751e1f64b5ca59986e63c688..1f163727f5c8df3945c9e63f719928e39af38767 100644 (file)
@@ -37,7 +37,8 @@ public class TestHelper {
                 mavenBundle(CONTROLLER, "sal-common-impl").versionAsInProject(), // //
 
                 mavenBundle("org.apache.commons", "commons-lang3").versionAsInProject(), //
-                mavenBundle("com.google.guava", "guava").versionAsInProject()
+                mavenBundle("com.google.guava", "guava").versionAsInProject(),
+                mavenBundle("com.github.romix", "java-concurrent-hash-trie-map").versionAsInProject()
         );
     }
 
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DelegatingPersistentDataProvider.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/DelegatingPersistentDataProvider.java
new file mode 100644 (file)
index 0000000..c74236b
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+
+/**
+ * A DataPersistenceProvider implementation that delegates to another implementation.
+ *
+ * @author Thomas Pantelis
+ */
+public class DelegatingPersistentDataProvider implements DataPersistenceProvider {
+    private DataPersistenceProvider delegate;
+
+    public DelegatingPersistentDataProvider(DataPersistenceProvider delegate) {
+        this.delegate = delegate;
+    }
+
+    public void setDelegate(DataPersistenceProvider delegate) {
+        this.delegate = delegate;
+    }
+
+    public DataPersistenceProvider getDelegate() {
+        return delegate;
+    }
+
+    @Override
+    public boolean isRecoveryApplicable() {
+        return delegate.isRecoveryApplicable();
+    }
+
+    @Override
+    public <T> void persist(T o, Procedure<T> procedure) {
+        delegate.persist(o, procedure);
+    }
+
+    @Override
+    public void saveSnapshot(Object o) {
+        delegate.saveSnapshot(o);
+    }
+
+    @Override
+    public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+        delegate.deleteSnapshots(criteria);
+    }
+
+    @Override
+    public void deleteMessages(long sequenceNumber) {
+        delegate.deleteMessages(sequenceNumber);
+    }
+}
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/NonPersistentDataProvider.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/NonPersistentDataProvider.java
new file mode 100644 (file)
index 0000000..fed8117
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A DataPersistenceProvider implementation with persistence disabled, essentially a no-op.
+ */
+public class NonPersistentDataProvider implements DataPersistenceProvider {
+    private static final Logger LOG = LoggerFactory.getLogger(NonPersistentDataProvider.class);
+
+    @Override
+    public boolean isRecoveryApplicable() {
+        return false;
+    }
+
+    @Override
+    public <T> void persist(T o, Procedure<T> procedure) {
+        try {
+            procedure.apply(o);
+        } catch (Exception e) {
+            LOG.error("An unexpected error occurred", e);
+        }
+    }
+
+    @Override
+    public void saveSnapshot(Object o) {
+    }
+
+    @Override
+    public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+    }
+
+    @Override
+    public void deleteMessages(long sequenceNumber) {
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/PersistentDataProvider.java b/opendaylight/md-sal/sal-clustering-commons/src/main/java/org/opendaylight/controller/cluster/PersistentDataProvider.java
new file mode 100644 (file)
index 0000000..f130a1f
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster;
+
+import akka.japi.Procedure;
+import akka.persistence.SnapshotSelectionCriteria;
+import akka.persistence.UntypedPersistentActor;
+import com.google.common.base.Preconditions;
+
+/**
+ * A DataPersistenceProvider implementation with persistence enabled.
+ */
+public class PersistentDataProvider implements DataPersistenceProvider {
+
+    private final UntypedPersistentActor persistentActor;
+
+    public PersistentDataProvider(UntypedPersistentActor persistentActor) {
+        this.persistentActor = Preconditions.checkNotNull(persistentActor, "persistentActor can't be null");
+    }
+
+    @Override
+    public boolean isRecoveryApplicable() {
+        return true;
+    }
+
+    @Override
+    public <T> void persist(T o, Procedure<T> procedure) {
+        persistentActor.persist(o, procedure);
+    }
+
+    @Override
+    public void saveSnapshot(Object o) {
+        persistentActor.saveSnapshot(o);
+    }
+
+    @Override
+    public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
+        persistentActor.deleteSnapshots(criteria);
+    }
+
+    @Override
+    public void deleteMessages(long sequenceNumber) {
+        persistentActor.deleteMessages(sequenceNumber);
+    }
+}
\ No newline at end of file
index 432c2d5615227d7d67f856bef6215fd550053084..326733f377e21d62fcf4865a0eeab10ad14e7ad6 100644 (file)
@@ -8,10 +8,7 @@
 
 package org.opendaylight.controller.cluster.common.actor;
 
-import akka.japi.Procedure;
-import akka.persistence.SnapshotSelectionCriteria;
 import akka.persistence.UntypedPersistentActor;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -69,71 +66,4 @@ public abstract class AbstractUntypedPersistentActor extends UntypedPersistentAc
         }
         unhandled(message);
     }
-
-    protected class PersistentDataProvider implements DataPersistenceProvider {
-
-        public PersistentDataProvider(){
-
-        }
-
-        @Override
-        public boolean isRecoveryApplicable() {
-            return true;
-        }
-
-        @Override
-        public <T> void persist(T o, Procedure<T> procedure) {
-            AbstractUntypedPersistentActor.this.persist(o, procedure);
-        }
-
-        @Override
-        public void saveSnapshot(Object o) {
-            AbstractUntypedPersistentActor.this.saveSnapshot(o);
-        }
-
-        @Override
-        public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
-            AbstractUntypedPersistentActor.this.deleteSnapshots(criteria);
-        }
-
-        @Override
-        public void deleteMessages(long sequenceNumber) {
-            AbstractUntypedPersistentActor.this.deleteMessages(sequenceNumber);
-        }
-    }
-
-    protected class NonPersistentDataProvider implements DataPersistenceProvider {
-
-        public NonPersistentDataProvider(){
-
-        }
-
-        @Override
-        public boolean isRecoveryApplicable() {
-            return false;
-        }
-
-        @Override
-        public <T> void persist(T o, Procedure<T> procedure) {
-            try {
-                procedure.apply(o);
-            } catch (Exception e) {
-                LOG.error("An unexpected error occurred", e);
-            }
-        }
-
-        @Override
-        public void saveSnapshot(Object o) {
-        }
-
-        @Override
-        public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
-
-        }
-
-        @Override
-        public void deleteMessages(long sequenceNumber) {
-
-        }
-    }
 }
index 9b4560c72623eb04d8014cb705a60a87dea87d57..2a6aac4d79d95ec603591fa84865d0517c35d84d 100644 (file)
@@ -26,9 +26,9 @@ public class MeteredBoundedMailbox implements MailboxType, ProducesMessageQueue<
     private final Logger LOG = LoggerFactory.getLogger(MeteredBoundedMailbox.class);
 
     private MeteredMessageQueue queue;
-    private Integer capacity;
-    private FiniteDuration pushTimeOut;
-    private MetricRegistry registry;
+    private final Integer capacity;
+    private final FiniteDuration pushTimeOut;
+    private final MetricRegistry registry;
 
     private final String QUEUE_SIZE = "q-size";
 
@@ -38,7 +38,7 @@ public class MeteredBoundedMailbox implements MailboxType, ProducesMessageQueue<
         this.capacity = commonConfig.getMailBoxCapacity();
         this.pushTimeOut = commonConfig.getMailBoxPushTimeout();
 
-        MetricsReporter reporter = MetricsReporter.getInstance();
+        MetricsReporter reporter = MetricsReporter.getInstance(MeteringBehavior.DOMAIN);
         registry = reporter.getMetricsRegistry();
     }
 
@@ -58,7 +58,9 @@ public class MeteredBoundedMailbox implements MailboxType, ProducesMessageQueue<
         String metricName = MetricRegistry.name(actorName, QUEUE_SIZE);
 
         if (registry.getMetrics().containsKey(metricName))
+        {
             return; //already registered
+        }
 
         Gauge<Integer> queueSize = getQueueSizeGuage(monitoredQueue);
         registerQueueSizeMetric(metricName, queueSize);
index 9ff185a61e27d61c335223d8b1150fa3738a41c6..c04e2e93402ad12915828812d4365016fabbda51 100644 (file)
@@ -26,10 +26,11 @@ import org.opendaylight.controller.cluster.reporting.MetricsReporter;
  * The information is reported to {@link org.opendaylight.controller.cluster.reporting.MetricsReporter}
  */
 public class MeteringBehavior implements Procedure<Object> {
+    public static final String DOMAIN = "org.opendaylight.controller.actor.metric";
 
     private final UntypedActor meteredActor;
 
-    private final MetricRegistry METRICREGISTRY = MetricsReporter.getInstance().getMetricsRegistry();
+    private final MetricRegistry METRICREGISTRY = MetricsReporter.getInstance(DOMAIN).getMetricsRegistry();
     private final String MSG_PROCESSING_RATE = "msg-rate";
 
     private String actorQualifiedName;
index d4aab036be21df1734f4f88bc590b35030a71d21..055ccfe0ceeeed6ee101b60f4dcda6d0a2a83a9d 100644 (file)
@@ -66,7 +66,7 @@ public class NormalizedNodeOutputStreamWriter implements NormalizedNodeStreamWri
         output = new DataOutputStream(stream);
     }
 
-    public NormalizedNodeOutputStreamWriter(DataOutput output) throws IOException {
+    public NormalizedNodeOutputStreamWriter(DataOutput output) {
         this.output = Preconditions.checkNotNull(output);
     }
 
index b400fcab7d93fac0aa67fa6a7a1bd1862b4a6888..9b93744368f486de2611306e2540df913219f6c9 100644 (file)
@@ -9,6 +9,9 @@ package org.opendaylight.controller.cluster.reporting;
 
 import com.codahale.metrics.JmxReporter;
 import com.codahale.metrics.MetricRegistry;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
 
 /**
  * Maintains metrics registry that is provided to reporters.
@@ -20,26 +23,36 @@ import com.codahale.metrics.MetricRegistry;
  */
 public class MetricsReporter implements AutoCloseable {
 
-    private static final MetricRegistry METRICS_REGISTRY = new MetricRegistry();
-    private static final String DOMAIN = "org.opendaylight.controller.actor.metric";
-    private static final MetricsReporter INSTANCE = new MetricsReporter();
-
-    private final JmxReporter jmxReporter = JmxReporter.forRegistry(METRICS_REGISTRY).inDomain(DOMAIN).build();
-
-    private MetricsReporter() {
+    private static LoadingCache<String, MetricsReporter> METRIC_REPORTERS = CacheBuilder.newBuilder().build(
+            new CacheLoader<String, MetricsReporter>() {
+                @Override
+                public MetricsReporter load(String domainName) {
+                    return new MetricsReporter(domainName);
+                }
+            });
+
+    private final String domainName;
+    private final JmxReporter jmxReporter;
+    private final MetricRegistry metricRegistry = new MetricRegistry();
+
+    private MetricsReporter(String domainName) {
+        this.domainName = domainName;
+        jmxReporter = JmxReporter.forRegistry(metricRegistry).inDomain(domainName).build();
         jmxReporter.start();
     }
 
-    public static MetricsReporter getInstance() {
-        return INSTANCE;
+    public static MetricsReporter getInstance(String domainName) {
+        return METRIC_REPORTERS.getUnchecked(domainName);
     }
 
     public MetricRegistry getMetricsRegistry() {
-        return METRICS_REGISTRY;
+        return metricRegistry;
     }
 
     @Override
     public void close() {
         jmxReporter.close();
+
+        METRIC_REPORTERS.invalidate(domainName);
     }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/AbstractThreePhaseCommitCohort.java
new file mode 100644 (file)
index 0000000..cac0f51
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import java.util.List;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import scala.concurrent.Future;
+
+/**
+ * Abstract base class for {@link DOMStoreThreePhaseCommitCohort} instances returned by this
+ * implementation. In addition to the usual set of methods it also contains the list of actor
+ * futures.
+ */
+abstract class AbstractThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
+    abstract List<Future<ActorSelection>> getCohortFutures();
+}
index 933e87ace2588388a624783960788a7a3c01bbd5..d94e1c691e704051a81f74c2ba3ec135e1da002e 100644 (file)
@@ -7,22 +7,40 @@
  */
 package org.opendaylight.controller.cluster.datastore;
 
-import com.google.common.collect.Lists;
+import com.google.common.collect.ImmutableList;
+import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
 import scala.concurrent.Future;
 
 abstract class AbstractTransactionContext implements TransactionContext {
 
-    protected final TransactionIdentifier identifier;
-    protected final List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
+    private final List<Future<Object>> recordedOperationFutures = new ArrayList<>();
+    private final TransactionIdentifier identifier;
 
-    AbstractTransactionContext(TransactionIdentifier identifier) {
+    protected AbstractTransactionContext(TransactionIdentifier identifier) {
         this.identifier = identifier;
     }
 
     @Override
-    public List<Future<Object>> getRecordedOperationFutures() {
-        return recordedOperationFutures;
+    public final void copyRecordedOperationFutures(Collection<Future<Object>> target) {
+        target.addAll(recordedOperationFutures);
     }
-}
\ No newline at end of file
+
+    protected final TransactionIdentifier getIdentifier() {
+        return identifier;
+    }
+
+    protected final Collection<Future<Object>> copyRecordedOperationFutures() {
+        return ImmutableList.copyOf(recordedOperationFutures);
+    }
+
+    protected final int recordedOperationCount() {
+        return recordedOperationFutures.size();
+    }
+
+    protected final void recordOperationFuture(Future<Object> future) {
+        recordedOperationFutures.add(future);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ChainedTransactionProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ChainedTransactionProxy.java
new file mode 100644 (file)
index 0000000..ed3aa85
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import akka.dispatch.OnComplete;
+import java.util.List;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+import scala.concurrent.Promise;
+
+final class ChainedTransactionProxy extends TransactionProxy {
+    private static final Logger LOG = LoggerFactory.getLogger(ChainedTransactionProxy.class);
+
+    /**
+     * Stores the ready Futures from the previous Tx in the chain.
+     */
+    private final List<Future<ActorSelection>> previousReadyFutures;
+
+    /**
+     * Stores the ready Futures from this transaction when it is readied.
+     */
+    private volatile List<Future<ActorSelection>> readyFutures;
+
+    ChainedTransactionProxy(ActorContext actorContext, TransactionType transactionType,
+            String transactionChainId, List<Future<ActorSelection>> previousReadyFutures) {
+        super(actorContext, transactionType, transactionChainId);
+        this.previousReadyFutures = previousReadyFutures;
+    }
+
+    List<Future<ActorSelection>> getReadyFutures() {
+        return readyFutures;
+    }
+
+    boolean isReady() {
+        return readyFutures != null;
+    }
+
+    @Override
+    public AbstractThreePhaseCommitCohort ready() {
+        final AbstractThreePhaseCommitCohort ret = super.ready();
+        readyFutures = ret.getCohortFutures();
+        LOG.debug("onTransactionReady {} pending readyFutures size {} chain {}", getIdentifier(),
+            readyFutures.size(), getTransactionChainId());
+        return ret;
+    }
+
+    /**
+     * This method is overridden to ensure the previous Tx's ready operations complete
+     * before we initiate the next Tx in the chain to avoid creation failures if the
+     * previous Tx's ready operations haven't completed yet.
+     */
+    @Override
+    protected Future<ActorSelection> sendFindPrimaryShardAsync(final String shardName) {
+        // Check if there are any previous ready Futures, otherwise let the super class handle it.
+        if(previousReadyFutures.isEmpty()) {
+            return super.sendFindPrimaryShardAsync(shardName);
+        }
+
+        if (LOG.isDebugEnabled()) {
+            LOG.debug("Waiting for {} previous ready futures for Tx {} on chain {}",
+                    previousReadyFutures.size(), getIdentifier(), getTransactionChainId());
+        }
+
+        // Combine the ready Futures into 1.
+        Future<Iterable<ActorSelection>> combinedFutures = akka.dispatch.Futures.sequence(
+                previousReadyFutures, getActorContext().getClientDispatcher());
+
+        // Add a callback for completion of the combined Futures.
+        final Promise<ActorSelection> returnPromise = akka.dispatch.Futures.promise();
+        OnComplete<Iterable<ActorSelection>> onComplete = new OnComplete<Iterable<ActorSelection>>() {
+            @Override
+            public void onComplete(Throwable failure, Iterable<ActorSelection> notUsed) {
+                if(failure != null) {
+                    // A Ready Future failed so fail the returned Promise.
+                    returnPromise.failure(failure);
+                } else {
+                    LOG.debug("Previous Tx readied - sending FindPrimaryShard for {} on chain {}",
+                            getIdentifier(), getTransactionChainId());
+
+                    // Send the FindPrimaryShard message and use the resulting Future to complete the
+                    // returned Promise.
+                    returnPromise.completeWith(ChainedTransactionProxy.super.sendFindPrimaryShardAsync(shardName));
+                }
+            }
+        };
+
+        combinedFutures.onComplete(onComplete, getActorContext().getClientDispatcher());
+
+        return returnPromise.future();
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerSupport.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataChangeListenerSupport.java
new file mode 100644 (file)
index 0000000..939ddf8
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import java.util.ArrayList;
+import java.util.List;
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class DataChangeListenerSupport extends LeaderLocalDelegateFactory<RegisterChangeListener, ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>>> {
+    private static final Logger LOG = LoggerFactory.getLogger(DataChangeListenerSupport.class);
+    private final List<DelayedListenerRegistration> delayedListenerRegistrations = new ArrayList<>();
+    private final List<ActorSelection> dataChangeListeners =  new ArrayList<>();
+
+    DataChangeListenerSupport(final Shard shard) {
+        super(shard);
+    }
+
+    @Override
+    void onLeadershipChange(final boolean isLeader) {
+        for (ActorSelection dataChangeListener : dataChangeListeners) {
+            dataChangeListener.tell(new EnableNotification(isLeader), getSelf());
+        }
+
+        if (isLeader) {
+            for (DelayedListenerRegistration reg: delayedListenerRegistrations) {
+                if(!reg.isClosed()) {
+                    reg.setDelegate(createDelegate(reg.getRegisterChangeListener()));
+                }
+            }
+
+            delayedListenerRegistrations.clear();
+        }
+    }
+
+    @Override
+    void onMessage(final RegisterChangeListener message, final boolean isLeader) {
+
+        LOG.debug("{}: registerDataChangeListener for {}, leader: {}", persistenceId(), message.getPath(), isLeader);
+
+        ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+                                                     NormalizedNode<?, ?>>> registration;
+        if (isLeader) {
+            registration = createDelegate(message);
+        } else {
+            LOG.debug("{}: Shard is not the leader - delaying registration", persistenceId());
+
+            DelayedListenerRegistration delayedReg = new DelayedListenerRegistration(message);
+            delayedListenerRegistrations.add(delayedReg);
+            registration = delayedReg;
+        }
+
+        ActorRef listenerRegistration = createActor(DataChangeListenerRegistration.props(registration));
+
+        LOG.debug("{}: registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
+                persistenceId(), listenerRegistration.path());
+
+        tellSender(new RegisterChangeListenerReply(listenerRegistration));
+    }
+
+    @Override
+    ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> createDelegate(
+            final RegisterChangeListener message) {
+        ActorSelection dataChangeListenerPath = selectActor(message.getDataChangeListenerPath());
+
+        // Notify the listener if notifications should be enabled or not
+        // If this shard is the leader then it will enable notifications else
+        // it will not
+        dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
+
+        // Now store a reference to the data change listener so it can be notified
+        // at a later point if notifications should be enabled or disabled
+        dataChangeListeners.add(dataChangeListenerPath);
+
+        AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener =
+                new DataChangeListenerProxy(dataChangeListenerPath);
+
+        LOG.debug("{}: Registering for path {}", persistenceId(), message.getPath());
+
+        return getShard().getDataStore().registerChangeListener(message.getPath(), listener,
+                message.getScope());
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerActor.java
new file mode 100644 (file)
index 0000000..3f11909
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.Props;
+import akka.japi.Creator;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChangedReply;
+import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Proxy actor which acts as a facade to the user-provided listener. Responsible for decapsulating
+ * DataTreeChanged messages and dispatching their context to the user.
+ */
+final class DataTreeChangeListenerActor extends AbstractUntypedActor {
+    private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerActor.class);
+    private final DOMDataTreeChangeListener listener;
+    private boolean notificationsEnabled = false;
+
+    private DataTreeChangeListenerActor(final DOMDataTreeChangeListener listener) {
+        this.listener = Preconditions.checkNotNull(listener);
+    }
+
+    @Override
+    protected void handleReceive(final Object message) {
+        if (message instanceof DataTreeChanged) {
+            dataChanged((DataTreeChanged)message);
+        } else if (message instanceof EnableNotification) {
+            enableNotification((EnableNotification) message);
+        }
+    }
+
+    private void dataChanged(final DataTreeChanged message) {
+        // Do nothing if notifications are not enabled
+        if (!notificationsEnabled) {
+            LOG.debug("Notifications not enabled for listener {} - dropping change notification", listener);
+            return;
+        }
+
+        LOG.debug("Sending change notification {} to listener {}", message.getChanges(), listener);
+
+        try {
+            this.listener.onDataTreeChanged(message.getChanges());
+        } catch (Exception e) {
+            LOG.error("Error notifying listener {}", this.listener, e);
+        }
+
+        // TODO: do we really need this?
+        // It seems the sender is never null but it doesn't hurt to check. If the caller passes in
+        // a null sender (ActorRef.noSender()), akka translates that to the deadLetters actor.
+        if (getSender() != null && !getContext().system().deadLetters().equals(getSender())) {
+            getSender().tell(DataTreeChangedReply.getInstance(), getSelf());
+        }
+    }
+
+    private void enableNotification(final EnableNotification message) {
+        notificationsEnabled = message.isEnabled();
+        LOG.debug("{} notifications for listener {}", (notificationsEnabled ? "Enabled" : "Disabled"),
+                listener);
+    }
+
+    public static Props props(final DOMDataTreeChangeListener listener) {
+        return Props.create(new DataTreeChangeListenerCreator(listener));
+    }
+
+    private static final class DataTreeChangeListenerCreator implements Creator<DataTreeChangeListenerActor> {
+        private static final long serialVersionUID = 1L;
+        private final DOMDataTreeChangeListener listener;
+
+        DataTreeChangeListenerCreator(final DOMDataTreeChangeListener listener) {
+            this.listener = Preconditions.checkNotNull(listener);
+        }
+
+        @Override
+        public DataTreeChangeListenerActor create() {
+            return new DataTreeChangeListenerActor(listener);
+        }
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxy.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerProxy.java
new file mode 100644 (file)
index 0000000..124724b
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.PoisonPill;
+import akka.dispatch.OnComplete;
+import com.google.common.base.Preconditions;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.cluster.datastore.exceptions.LocalShardNotFoundException;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.AbstractListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import scala.concurrent.Future;
+
+/**
+ * Proxy class for holding required state to lazily instantiate a listener registration with an
+ * asynchronously-discovered actor.
+ *
+ * @param <T> listener type
+ */
+final class DataTreeChangeListenerProxy<T extends DOMDataTreeChangeListener> extends AbstractListenerRegistration<T> {
+    private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerProxy.class);
+    private final ActorRef dataChangeListenerActor;
+    private final ActorContext actorContext;
+
+    @GuardedBy("this")
+    private ActorSelection listenerRegistrationActor;
+
+    public DataTreeChangeListenerProxy(final ActorContext actorContext, final T listener) {
+        super(listener);
+        this.actorContext = Preconditions.checkNotNull(actorContext);
+        this.dataChangeListenerActor = actorContext.getActorSystem().actorOf(
+            DataTreeChangeListenerActor.props(getInstance()).withDispatcher(actorContext.getNotificationDispatcherPath()));
+    }
+
+    @Override
+    protected synchronized void removeRegistration() {
+        if (listenerRegistrationActor != null) {
+            listenerRegistrationActor.tell(CloseDataTreeChangeListenerRegistration.getInstance(), ActorRef.noSender());
+            listenerRegistrationActor = null;
+        }
+
+        dataChangeListenerActor.tell(PoisonPill.getInstance(), ActorRef.noSender());
+    }
+
+    void init(final String shardName, final YangInstanceIdentifier treeId) {
+        Future<ActorRef> findFuture = actorContext.findLocalShardAsync(shardName);
+        findFuture.onComplete(new OnComplete<ActorRef>() {
+            @Override
+            public void onComplete(final Throwable failure, final ActorRef shard) {
+                if (failure instanceof LocalShardNotFoundException) {
+                    LOG.debug("No local shard found for {} - DataTreeChangeListener {} at path {} " +
+                            "cannot be registered", shardName, getInstance(), treeId);
+                } else if (failure != null) {
+                    LOG.error("Failed to find local shard {} - DataTreeChangeListener {} at path {} " +
+                            "cannot be registered: {}", shardName, getInstance(), treeId, failure);
+                } else {
+                    doRegistration(shard, treeId);
+                }
+            }
+        }, actorContext.getClientDispatcher());
+    }
+
+    private void setListenerRegistrationActor(final ActorSelection actor) {
+        if (actor == null) {
+            LOG.debug("Ignoring null actor on {}", this);
+            return;
+        }
+
+        synchronized (this) {
+            if (!isClosed()) {
+                this.listenerRegistrationActor = actor;
+                return;
+            }
+        }
+
+        // This registration has already been closed, notify the actor
+        actor.tell(CloseDataTreeChangeListenerRegistration.getInstance(), null);
+    }
+
+    private void doRegistration(final ActorRef shard, final YangInstanceIdentifier path) {
+
+        Future<Object> future = actorContext.executeOperationAsync(shard,
+                new RegisterDataTreeChangeListener(path, dataChangeListenerActor),
+                actorContext.getDatastoreContext().getShardInitializationTimeout());
+
+        future.onComplete(new OnComplete<Object>(){
+            @Override
+            public void onComplete(final Throwable failure, final Object result) {
+                if (failure != null) {
+                    LOG.error("Failed to register DataTreeChangeListener {} at path {}",
+                            getInstance(), path.toString(), failure);
+                } else {
+                    RegisterDataTreeChangeListenerReply reply = (RegisterDataTreeChangeListenerReply) result;
+                    setListenerRegistrationActor(actorContext.actorSelection(
+                            reply.getListenerRegistrationPath().path()));
+                }
+            }
+        }, actorContext.getClientDispatcher());
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerRegistrationActor.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerRegistrationActor.java
new file mode 100644 (file)
index 0000000..7d0117f
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.PoisonPill;
+import akka.actor.Props;
+import akka.japi.Creator;
+import com.google.common.base.Preconditions;
+import org.opendaylight.controller.cluster.common.actor.AbstractUntypedActor;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistration;
+import org.opendaylight.controller.cluster.datastore.messages.CloseDataTreeChangeListenerRegistrationReply;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * Actor co-located with a shard. It exists only to terminate the registration when
+ * asked to do so via {@link CloseDataTreeChangeListenerRegistration}.
+ */
+public final class DataTreeChangeListenerRegistrationActor extends AbstractUntypedActor {
+    private final ListenerRegistration<DOMDataTreeChangeListener> registration;
+
+    public DataTreeChangeListenerRegistrationActor(final ListenerRegistration<DOMDataTreeChangeListener> registration) {
+        this.registration = Preconditions.checkNotNull(registration);
+    }
+
+    @Override
+    protected void handleReceive(Object message) throws Exception {
+        if (message instanceof CloseDataTreeChangeListenerRegistration) {
+            registration.close();
+            getSender().tell(CloseDataTreeChangeListenerRegistrationReply.getInstance(), getSelf());
+            getSelf().tell(PoisonPill.getInstance(), getSelf());
+        }
+    }
+
+    public static Props props(final ListenerRegistration<DOMDataTreeChangeListener> registration) {
+        return Props.create(new DataTreeChangeListenerRegistrationCreator(registration));
+    }
+
+    private static final class DataTreeChangeListenerRegistrationCreator implements Creator<DataTreeChangeListenerRegistrationActor> {
+        private static final long serialVersionUID = 1L;
+        final ListenerRegistration<DOMDataTreeChangeListener> registration;
+
+        DataTreeChangeListenerRegistrationCreator(ListenerRegistration<DOMDataTreeChangeListener> registration) {
+            this.registration = Preconditions.checkNotNull(registration);
+        }
+
+        @Override
+        public DataTreeChangeListenerRegistrationActor create() {
+            return new DataTreeChangeListenerRegistrationActor(registration);
+        }
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerSupport.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DataTreeChangeListenerSupport.java
new file mode 100644 (file)
index 0000000..3987c9a
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import java.util.ArrayList;
+import java.util.Collection;
+import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListenerReply;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+final class DataTreeChangeListenerSupport extends LeaderLocalDelegateFactory<RegisterDataTreeChangeListener, ListenerRegistration<DOMDataTreeChangeListener>> {
+    private static final Logger LOG = LoggerFactory.getLogger(DataTreeChangeListenerSupport.class);
+    private final ArrayList<DelayedDataTreeListenerRegistration> delayedRegistrations = new ArrayList<>();
+    private final Collection<ActorSelection> actors = new ArrayList<>();
+
+    DataTreeChangeListenerSupport(final Shard shard) {
+        super(shard);
+    }
+
+    @Override
+    void onLeadershipChange(final boolean isLeader) {
+        if (isLeader) {
+            for (DelayedDataTreeListenerRegistration reg : delayedRegistrations) {
+                reg.createDelegate(this);
+            }
+            delayedRegistrations.clear();
+            delayedRegistrations.trimToSize();
+        }
+
+        final EnableNotification msg = new EnableNotification(isLeader);
+        for (ActorSelection dataChangeListener : actors) {
+            dataChangeListener.tell(msg, getSelf());
+        }
+    }
+
+    @Override
+    void onMessage(final RegisterDataTreeChangeListener registerTreeChangeListener, final boolean isLeader) {
+        LOG.debug("{}: registerTreeChangeListener for {}, leader: {}", persistenceId(), registerTreeChangeListener.getPath(), isLeader);
+
+        final ListenerRegistration<DOMDataTreeChangeListener> registration;
+        if (!isLeader) {
+            LOG.debug("{}: Shard is not the leader - delaying registration", persistenceId());
+
+            DelayedDataTreeListenerRegistration delayedReg =
+                    new DelayedDataTreeListenerRegistration(registerTreeChangeListener);
+            delayedRegistrations.add(delayedReg);
+            registration = delayedReg;
+        } else {
+            registration = createDelegate(registerTreeChangeListener);
+        }
+
+        ActorRef listenerRegistration = createActor(DataTreeChangeListenerRegistrationActor.props(registration));
+
+        LOG.debug("{}: registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
+            persistenceId(), listenerRegistration.path());
+
+        tellSender(new RegisterDataTreeChangeListenerReply(listenerRegistration));
+    }
+
+    @Override
+    ListenerRegistration<DOMDataTreeChangeListener> createDelegate(final RegisterDataTreeChangeListener message) {
+        ActorSelection dataChangeListenerPath = selectActor(message.getDataTreeChangeListenerPath());
+
+        // Notify the listener if notifications should be enabled or not
+        // If this shard is the leader then it will enable notifications else
+        // it will not
+        dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
+
+        // Now store a reference to the data change listener so it can be notified
+        // at a later point if notifications should be enabled or disabled
+        actors.add(dataChangeListenerPath);
+
+        DOMDataTreeChangeListener listener = new ForwardingDataTreeChangeListener(dataChangeListenerPath);
+
+        LOG.debug("{}: Registering for path {}", persistenceId(), message.getPath());
+
+        return getShard().getDataStore().registerTreeChangeListener(message.getPath(), listener);
+    }
+}
index d5142c94a68b53311293de80cf5fc73d9415ed9b..c3cc4998656e82b37354500a1b629534c5622abe 100644 (file)
@@ -9,6 +9,8 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import akka.util.Timeout;
+import com.google.common.collect.Sets;
+import java.util.Set;
 import java.util.concurrent.TimeUnit;
 import org.apache.commons.lang3.text.WordUtils;
 import org.opendaylight.controller.cluster.datastore.config.ConfigurationReader;
@@ -25,6 +27,7 @@ import scala.concurrent.duration.FiniteDuration;
  * @author Thomas Pantelis
  */
 public class DatastoreContext {
+    public static final String METRICS_DOMAIN = "org.opendaylight.controller.cluster.datastore";
 
     public static final Duration DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT = Duration.create(10, TimeUnit.MINUTES);
     public static final int DEFAULT_OPERATION_TIMEOUT_IN_SECONDS = 5;
@@ -44,6 +47,8 @@ public class DatastoreContext {
     public static final String UNKNOWN_DATA_STORE_TYPE = "unknown";
     public static final int DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT= 100;
 
+    private static Set<String> globalDatastoreTypes = Sets.newConcurrentHashSet();
+
     private InMemoryDOMDataStoreConfigProperties dataStoreProperties;
     private Duration shardTransactionIdleTimeout = DatastoreContext.DEFAULT_SHARD_TRANSACTION_IDLE_TIMEOUT;
     private int operationTimeoutInSeconds = DEFAULT_OPERATION_TIMEOUT_IN_SECONDS;
@@ -60,6 +65,10 @@ public class DatastoreContext {
     private int shardBatchedModificationCount = DEFAULT_SHARD_BATCHED_MODIFICATION_COUNT;
     private boolean writeOnlyTransactionOptimizationsEnabled = false;
 
+    public static Set<String> getGlobalDatastoreTypes() {
+        return globalDatastoreTypes;
+    }
+
     private DatastoreContext() {
         setShardJournalRecoveryLogBatchSize(DEFAULT_JOURNAL_RECOVERY_BATCH_SIZE);
         setSnapshotBatchCount(DEFAULT_SNAPSHOT_BATCH_COUNT);
@@ -361,6 +370,11 @@ public class DatastoreContext {
             datastoreContext.dataStoreProperties = InMemoryDOMDataStoreConfigProperties.create(
                     maxShardDataChangeExecutorPoolSize, maxShardDataChangeExecutorQueueSize,
                     maxShardDataChangeListenerQueueSize, maxShardDataStoreExecutorQueueSize);
+
+            if(datastoreContext.dataStoreType != null) {
+                globalDatastoreTypes.add(datastoreContext.dataStoreType);
+            }
+
             return datastoreContext;
         }
     }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedDataTreeListenerRegistration.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedDataTreeListenerRegistration.java
new file mode 100644 (file)
index 0000000..b3ae8a3
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import javax.annotation.concurrent.GuardedBy;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+
+/**
+ * Intermediate proxy registration returned to the user when we cannot
+ * instantiate the registration immediately. It provides a bridge to
+ * a real registration which may materialize at some point in the future.
+ */
+final class DelayedDataTreeListenerRegistration implements ListenerRegistration<DOMDataTreeChangeListener> {
+    private final RegisterDataTreeChangeListener registerTreeChangeListener;
+    private volatile ListenerRegistration<DOMDataTreeChangeListener> delegate;
+    @GuardedBy("this")
+    private boolean closed;
+
+    DelayedDataTreeListenerRegistration(final RegisterDataTreeChangeListener registerTreeChangeListener) {
+        this.registerTreeChangeListener = Preconditions.checkNotNull(registerTreeChangeListener);
+    }
+
+    synchronized void createDelegate(final DelegateFactory<RegisterDataTreeChangeListener, ListenerRegistration<DOMDataTreeChangeListener>> factory) {
+        if (!closed) {
+            this.delegate = factory.createDelegate(registerTreeChangeListener);
+        }
+    }
+
+    @Override
+    public DOMDataTreeChangeListener getInstance() {
+        final ListenerRegistration<DOMDataTreeChangeListener> d = delegate;
+        return d == null ? null : d.getInstance();
+    }
+
+    @Override
+    public synchronized void close() {
+        if (!closed) {
+            closed = true;
+            if (delegate != null) {
+                delegate.close();
+            }
+        }
+    }
+}
+
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedListenerRegistration.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelayedListenerRegistration.java
new file mode 100644 (file)
index 0000000..8eb595d
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
+import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+
+final class DelayedListenerRegistration implements
+    ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> {
+
+    private volatile boolean closed;
+
+    private final RegisterChangeListener registerChangeListener;
+
+    private volatile ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+                                                         NormalizedNode<?, ?>>> delegate;
+
+    DelayedListenerRegistration(final RegisterChangeListener registerChangeListener) {
+        this.registerChangeListener = registerChangeListener;
+    }
+
+    void setDelegate( final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
+                                        NormalizedNode<?, ?>>> registration) {
+        this.delegate = registration;
+    }
+
+    boolean isClosed() {
+        return closed;
+    }
+
+    RegisterChangeListener getRegisterChangeListener() {
+        return registerChangeListener;
+    }
+
+    @Override
+    public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
+        return delegate != null ? delegate.getInstance() : null;
+    }
+
+    @Override
+    public void close() {
+        closed = true;
+        if(delegate != null) {
+            delegate.close();
+        }
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelegateFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/DelegateFactory.java
new file mode 100644 (file)
index 0000000..e6702d9
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+/**
+ * Base class for factories instantiating delegates.
+ *
+ * <D> delegate type
+ * <M> message type
+ */
+abstract class DelegateFactory<M, D> {
+    abstract D createDelegate(M message);
+}
index 5ade98cb86106a7b65d82251dccced95c93f0914..69c127f2897017f218222b2a95b270b1bcc9f0de 100644 (file)
@@ -15,15 +15,18 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardManagerIdentifier;
 import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreConfigurationMXBeanImpl;
+import org.opendaylight.controller.cluster.datastore.jmx.mbeans.DatastoreInfoMXBeanImpl;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
 import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
 import org.opendaylight.controller.cluster.datastore.utils.Dispatchers;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataBroker;
 import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.controller.sal.core.spi.data.DOMStore;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
+import org.opendaylight.controller.sal.core.spi.data.DOMStoreTreeChangePublisher;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -37,7 +40,7 @@ import org.slf4j.LoggerFactory;
  *
  */
 public class DistributedDataStore implements DOMStore, SchemaContextListener,
-        DatastoreContextConfigAdminOverlay.Listener, AutoCloseable {
+        DatastoreContextConfigAdminOverlay.Listener, DOMStoreTreeChangePublisher, AutoCloseable {
 
     private static final Logger LOG = LoggerFactory.getLogger(DistributedDataStore.class);
     private static final String UNKNOWN_TYPE = "unknown";
@@ -52,7 +55,9 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener,
 
     private DatastoreConfigurationMXBeanImpl datastoreConfigMXBean;
 
-    private CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1);
+    private DatastoreInfoMXBeanImpl datastoreInfoMXBean;
+
+    private final CountDownLatch waitTillReadyCountDownLatch = new CountDownLatch(1);
 
     private final String type;
 
@@ -84,6 +89,9 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener,
         datastoreConfigMXBean = new DatastoreConfigurationMXBeanImpl(datastoreContext.getDataStoreMXBeanType());
         datastoreConfigMXBean.setContext(datastoreContext);
         datastoreConfigMXBean.registerMBean();
+
+        datastoreInfoMXBean = new DatastoreInfoMXBeanImpl(datastoreContext.getDataStoreMXBeanType(), actorContext);
+        datastoreInfoMXBean.registerMBean();
     }
 
     public DistributedDataStore(ActorContext actorContext) {
@@ -119,6 +127,21 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener,
         return listenerRegistrationProxy;
     }
 
+    @Override
+    public <L extends DOMDataTreeChangeListener> ListenerRegistration<L> registerTreeChangeListener(YangInstanceIdentifier treeId, L listener) {
+        Preconditions.checkNotNull(treeId, "treeId should not be null");
+        Preconditions.checkNotNull(listener, "listener should not be null");
+
+        final String shardName = ShardStrategyFactory.getStrategy(treeId).findShard(treeId);
+        LOG.debug("Registering tree listener: {} for tree: {} shard: {}", listener, treeId, shardName);
+
+        final DataTreeChangeListenerProxy<L> listenerRegistrationProxy =
+                new DataTreeChangeListenerProxy<L>(actorContext, listener);
+        listenerRegistrationProxy.init(shardName, treeId);
+
+        return listenerRegistrationProxy;
+    }
+
     @Override
     public DOMStoreTransactionChain createTransactionChain() {
         return new TransactionChainProxy(actorContext);
@@ -157,6 +180,7 @@ public class DistributedDataStore implements DOMStore, SchemaContextListener,
     @Override
     public void close() {
         datastoreConfigMXBean.unregisterMBean();
+        datastoreInfoMXBean.unregisterMBean();
 
         if(closeable != null) {
             try {
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ForwardingDataTreeChangeListener.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/ForwardingDataTreeChangeListener.java
new file mode 100644 (file)
index 0000000..7ca21d4
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import com.google.common.base.Preconditions;
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import java.util.Collection;
+import org.opendaylight.controller.cluster.datastore.messages.DataTreeChanged;
+import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * Internal implementation of a {@link DOMDataTreeChangeListener} which
+ * encapsulates received notifications into a {@link DataTreeChanged}
+ * message and forwards them towards the client's {@link DataTreeChangeListenerActor}.
+ */
+final class ForwardingDataTreeChangeListener implements DOMDataTreeChangeListener {
+    private final ActorSelection actor;
+
+    ForwardingDataTreeChangeListener(final ActorSelection actor) {
+        this.actor = Preconditions.checkNotNull(actor, "actor should not be null");
+    }
+
+    @Override
+    public void onDataTreeChanged(Collection<DataTreeCandidate> changes) {
+        actor.tell(new DataTreeChanged(changes), ActorRef.noSender());
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LeaderLocalDelegateFactory.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/LeaderLocalDelegateFactory.java
new file mode 100644 (file)
index 0000000..d33cebb
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorPath;
+import akka.actor.ActorRef;
+import akka.actor.ActorSelection;
+import akka.actor.Props;
+import com.google.common.base.Preconditions;
+
+/**
+ * Base class for factories instantiating delegates which are local to the
+ * shard leader.
+ *
+ * <D> delegate type
+ * <M> message type
+ */
+abstract class LeaderLocalDelegateFactory<M, D> extends DelegateFactory<M, D> {
+    private final Shard shard;
+
+    protected LeaderLocalDelegateFactory(final Shard shard) {
+        this.shard = Preconditions.checkNotNull(shard);
+    }
+
+    protected final ActorRef getSelf() {
+        return shard.getSelf();
+    }
+
+    protected final Shard getShard() {
+        return shard;
+    }
+
+    protected final String persistenceId() {
+        return shard.persistenceId();
+    }
+
+    protected final void tellSender(final Object message) {
+        shard.getSender().tell(message, getSelf());
+    }
+
+    protected final ActorRef createActor(final Props props) {
+        return shard.getContext().actorOf(props);
+    }
+
+    protected final ActorSelection selectActor(ActorRef ref) {
+        return shard.getContext().system().actorSelection(ref.path());
+    }
+
+    protected final ActorSelection selectActor(ActorPath path) {
+        return shard.getContext().system().actorSelection(path);
+    }
+
+    /**
+     * Invoked whenever the local shard's leadership role changes.
+     *
+     * @param isLeader true if the shard has become leader, false if it has
+     *                 become a follower.
+     */
+    abstract void onLeadershipChange(boolean isLeader);
+    abstract void onMessage(M message, boolean isLeader);
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpDOMStoreThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/NoOpDOMStoreThreePhaseCommitCohort.java
new file mode 100644 (file)
index 0000000..376b658
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import java.util.Collections;
+import java.util.List;
+import scala.concurrent.Future;
+
+/**
+ * A {@link org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort}
+ * instance given out for empty transactions.
+ */
+final class NoOpDOMStoreThreePhaseCommitCohort extends AbstractThreePhaseCommitCohort {
+    static final NoOpDOMStoreThreePhaseCommitCohort INSTANCE = new NoOpDOMStoreThreePhaseCommitCohort();
+
+    private static final ListenableFuture<Void> IMMEDIATE_VOID_SUCCESS = Futures.immediateFuture(null);
+    private static final ListenableFuture<Boolean> IMMEDIATE_BOOLEAN_SUCCESS = Futures.immediateFuture(Boolean.TRUE);
+
+    private NoOpDOMStoreThreePhaseCommitCohort() {
+        // Hidden to prevent instantiation
+    }
+
+    @Override
+    public ListenableFuture<Boolean> canCommit() {
+        return IMMEDIATE_BOOLEAN_SUCCESS;
+    }
+
+    @Override
+    public ListenableFuture<Void> preCommit() {
+        return IMMEDIATE_VOID_SUCCESS;
+    }
+
+    @Override
+    public ListenableFuture<Void> abort() {
+        return IMMEDIATE_VOID_SUCCESS;
+    }
+
+    @Override
+    public ListenableFuture<Void> commit() {
+        return IMMEDIATE_VOID_SUCCESS;
+    }
+
+    @Override
+    List<Future<ActorSelection>> getCohortFutures() {
+        return Collections.emptyList();
+    }
+}
index 84f07760f53f4a50b7fd3da61b2b3ddeb7f3fe31..672560bbdd5c6b01a149e60dae7ae00d3d309f2f 100644 (file)
@@ -33,44 +33,44 @@ final class NoOpTransactionContext extends AbstractTransactionContext {
 
     @Override
     public void closeTransaction() {
-        LOG.debug("NoOpTransactionContext {} closeTransaction called", identifier);
+        LOG.debug("NoOpTransactionContext {} closeTransaction called", getIdentifier());
     }
 
     @Override
     public Future<ActorSelection> readyTransaction() {
-        LOG.debug("Tx {} readyTransaction called", identifier);
+        LOG.debug("Tx {} readyTransaction called", getIdentifier());
         operationLimiter.release();
         return akka.dispatch.Futures.failed(failure);
     }
 
     @Override
     public void deleteData(YangInstanceIdentifier path) {
-        LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+        LOG.debug("Tx {} deleteData called path = {}", getIdentifier(), path);
         operationLimiter.release();
     }
 
     @Override
     public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-        LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+        LOG.debug("Tx {} mergeData called path = {}", getIdentifier(), path);
         operationLimiter.release();
     }
 
     @Override
     public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-        LOG.debug("Tx {} writeData called path = {}", identifier, path);
+        LOG.debug("Tx {} writeData called path = {}", getIdentifier(), path);
         operationLimiter.release();
     }
 
     @Override
     public void readData(final YangInstanceIdentifier path, SettableFuture<Optional<NormalizedNode<?, ?>>> proxyFuture) {
-        LOG.debug("Tx {} readData called path = {}", identifier, path);
+        LOG.debug("Tx {} readData called path = {}", getIdentifier(), path);
         operationLimiter.release();
         proxyFuture.setException(new ReadFailedException("Error reading data for path " + path, failure));
     }
 
     @Override
     public void dataExists(YangInstanceIdentifier path, SettableFuture<Boolean> proxyFuture) {
-        LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+        LOG.debug("Tx {} dataExists called path = {}", getIdentifier(), path);
         operationLimiter.release();
         proxyFuture.setException(new ReadFailedException("Error checking exists for path " + path, failure));
     }
index 8e00a1389ca6a057bef39292ffcc0b04958be794..9cd52b219a442320bd0dc8bf12cdfc6b6c4a577b 100644 (file)
@@ -18,18 +18,15 @@ import akka.serialization.Serialization;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import java.io.IOException;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import javax.annotation.Nonnull;
-import org.opendaylight.controller.cluster.DataPersistenceProvider;
 import org.opendaylight.controller.cluster.common.actor.CommonConfig;
 import org.opendaylight.controller.cluster.common.actor.MeteringBehavior;
 import org.opendaylight.controller.cluster.datastore.ShardCommitCoordinator.CohortEntry;
@@ -51,12 +48,11 @@ import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionR
 import org.opendaylight.controller.cluster.datastore.messages.CreateSnapshot;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
-import org.opendaylight.controller.cluster.datastore.messages.EnableNotification;
 import org.opendaylight.controller.cluster.datastore.messages.ForwardedReadyTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.PeerAddressResolved;
 import org.opendaylight.controller.cluster.datastore.messages.ReadyTransactionReply;
 import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListener;
-import org.opendaylight.controller.cluster.datastore.messages.RegisterChangeListenerReply;
+import org.opendaylight.controller.cluster.datastore.messages.RegisterDataTreeChangeListener;
 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
 import org.opendaylight.controller.cluster.datastore.modification.Modification;
 import org.opendaylight.controller.cluster.datastore.modification.ModificationPayload;
@@ -72,13 +68,11 @@ import org.opendaylight.controller.cluster.raft.messages.AppendEntriesReply;
 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationByteStringPayload;
 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.CompositeModificationPayload;
 import org.opendaylight.controller.cluster.raft.protobuff.client.messages.Payload;
-import org.opendaylight.controller.md.sal.common.api.data.AsyncDataChangeListener;
 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStore;
 import org.opendaylight.controller.md.sal.dom.store.impl.InMemoryDOMDataStoreFactory;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.opendaylight.yangtools.concepts.ListenerRegistration;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
@@ -108,15 +102,8 @@ public class Shard extends RaftActor {
 
     private final ShardStats shardMBean;
 
-    private final List<ActorSelection> dataChangeListeners =  Lists.newArrayList();
-
-    private final List<DelayedListenerRegistration> delayedListenerRegistrations =
-                                                                       Lists.newArrayList();
-
     private DatastoreContext datastoreContext;
 
-    private DataPersistenceProvider dataPersistenceProvider;
-
     private SchemaContext schemaContext;
 
     private int createSnapshotTransactionCounter;
@@ -144,6 +131,9 @@ public class Shard extends RaftActor {
 
     private final String txnDispatcherPath;
 
+    private final DataTreeChangeListenerSupport treeChangeSupport = new DataTreeChangeListenerSupport(this);
+    private final DataChangeListenerSupport changeSupport = new DataChangeListenerSupport(this);
+
     protected Shard(final ShardIdentifier name, final Map<String, String> peerAddresses,
             final DatastoreContext datastoreContext, final SchemaContext schemaContext) {
         super(name.toString(), new HashMap<>(peerAddresses), Optional.of(datastoreContext.getShardRaftConfig()));
@@ -151,18 +141,17 @@ public class Shard extends RaftActor {
         this.name = name.toString();
         this.datastoreContext = datastoreContext;
         this.schemaContext = schemaContext;
-        this.dataPersistenceProvider = (datastoreContext.isPersistent())
-                ? new PersistentDataProvider() : new NonPersistentRaftDataProvider();
         this.txnDispatcherPath = new Dispatchers(context().system().dispatchers())
                 .getDispatcherPath(Dispatchers.DispatcherType.Transaction);
 
+        setPersistence(datastoreContext.isPersistent());
 
         LOG.info("Shard created : {}, persistent : {}", name, datastoreContext.isPersistent());
 
         store = InMemoryDOMDataStoreFactory.create(name.toString(), null,
                 datastoreContext.getDataStoreProperties());
 
-        if(schemaContext != null) {
+        if (schemaContext != null) {
             store.onGlobalContextUpdated(schemaContext);
         }
 
@@ -275,7 +264,9 @@ public class Shard extends RaftActor {
             } else if (CloseTransactionChain.SERIALIZABLE_CLASS.isInstance(message)) {
                 closeTransactionChain(CloseTransactionChain.fromSerializable(message));
             } else if (message instanceof RegisterChangeListener) {
-                registerChangeListener((RegisterChangeListener) message);
+                changeSupport.onMessage((RegisterChangeListener) message, isLeader());
+            } else if (message instanceof RegisterDataTreeChangeListener) {
+                treeChangeSupport.onMessage((RegisterDataTreeChangeListener) message, isLeader());
             } else if (message instanceof UpdateSchemaContext) {
                 updateSchemaContext((UpdateSchemaContext) message);
             } else if (message instanceof PeerAddressResolved) {
@@ -311,12 +302,10 @@ public class Shard extends RaftActor {
 
         setTransactionCommitTimeout();
 
-        if(datastoreContext.isPersistent() &&
-                dataPersistenceProvider instanceof NonPersistentRaftDataProvider) {
-            dataPersistenceProvider = new PersistentDataProvider();
-        } else if(!datastoreContext.isPersistent() &&
-                dataPersistenceProvider instanceof PersistentDataProvider) {
-            dataPersistenceProvider = new NonPersistentRaftDataProvider();
+        if(datastoreContext.isPersistent() && !persistence().isRecoveryApplicable()) {
+            setPersistence(true);
+        } else if(!datastoreContext.isPersistent() && persistence().isRecoveryApplicable()) {
+            setPersistence(false);
         }
 
         updateConfigParams(datastoreContext.getShardRaftConfig());
@@ -654,58 +643,7 @@ public class Shard extends RaftActor {
         store.onGlobalContextUpdated(schemaContext);
     }
 
-    private void registerChangeListener(final RegisterChangeListener registerChangeListener) {
-
-        LOG.debug("{}: registerDataChangeListener for {}", persistenceId(), registerChangeListener.getPath());
-
-        ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
-                                                     NormalizedNode<?, ?>>> registration;
-        if(isLeader()) {
-            registration = doChangeListenerRegistration(registerChangeListener);
-        } else {
-            LOG.debug("{}: Shard is not the leader - delaying registration", persistenceId());
-
-            DelayedListenerRegistration delayedReg =
-                    new DelayedListenerRegistration(registerChangeListener);
-            delayedListenerRegistrations.add(delayedReg);
-            registration = delayedReg;
-        }
-
-        ActorRef listenerRegistration = getContext().actorOf(
-                DataChangeListenerRegistration.props(registration));
-
-        LOG.debug("{}: registerDataChangeListener sending reply, listenerRegistrationPath = {} ",
-                persistenceId(), listenerRegistration.path());
-
-        getSender().tell(new RegisterChangeListenerReply(listenerRegistration), getSelf());
-    }
-
-    private ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
-                                               NormalizedNode<?, ?>>> doChangeListenerRegistration(
-            final RegisterChangeListener registerChangeListener) {
-
-        ActorSelection dataChangeListenerPath = getContext().system().actorSelection(
-                registerChangeListener.getDataChangeListenerPath());
-
-        // Notify the listener if notifications should be enabled or not
-        // If this shard is the leader then it will enable notifications else
-        // it will not
-        dataChangeListenerPath.tell(new EnableNotification(true), getSelf());
-
-        // Now store a reference to the data change listener so it can be notified
-        // at a later point if notifications should be enabled or disabled
-        dataChangeListeners.add(dataChangeListenerPath);
-
-        AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> listener =
-                new DataChangeListenerProxy(dataChangeListenerPath);
-
-        LOG.debug("{}: Registering for path {}", persistenceId(), registerChangeListener.getPath());
-
-        return store.registerChangeListener(registerChangeListener.getPath(), listener,
-                registerChangeListener.getScope());
-    }
-
-    private boolean isMetricsCaptureEnabled(){
+    private boolean isMetricsCaptureEnabled() {
         CommonConfig config = new CommonConfig(getContext().system().settings().config());
         return config.isMetricCaptureEnabled();
     }
@@ -832,22 +770,11 @@ public class Shard extends RaftActor {
     @Override
     protected void onStateChanged() {
         boolean isLeader = isLeader();
-        for (ActorSelection dataChangeListener : dataChangeListeners) {
-            dataChangeListener.tell(new EnableNotification(isLeader), getSelf());
-        }
-
-        if(isLeader) {
-            for(DelayedListenerRegistration reg: delayedListenerRegistrations) {
-                if(!reg.isClosed()) {
-                    reg.setDelegate(doChangeListenerRegistration(reg.getRegisterChangeListener()));
-                }
-            }
-
-            delayedListenerRegistrations.clear();
-        }
+        changeSupport.onLeadershipChange(isLeader);
+        treeChangeSupport.onLeadershipChange(isLeader);
 
         // If this actor is no longer the leader close all the transaction chains
-        if(!isLeader) {
+        if (!isLeader) {
             if(LOG.isDebugEnabled()) {
                 LOG.debug(
                     "{}: onStateChanged: Closing all transaction chains because shard {} is no longer the leader",
@@ -859,19 +786,10 @@ public class Shard extends RaftActor {
     }
 
     @Override
-    protected DataPersistenceProvider persistence() {
-        return dataPersistenceProvider;
-    }
-
-    @Override public String persistenceId() {
+    public String persistenceId() {
         return this.name;
     }
 
-    @VisibleForTesting
-    DataPersistenceProvider getDataPersistenceProvider() {
-        return dataPersistenceProvider;
-    }
-
     @VisibleForTesting
     ShardCommitCoordinator getCommitCoordinator() {
         return commitCoordinator;
@@ -910,45 +828,4 @@ public class Shard extends RaftActor {
     ShardStats getShardMBean() {
         return shardMBean;
     }
-
-    private static class DelayedListenerRegistration implements
-        ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>>> {
-
-        private volatile boolean closed;
-
-        private final RegisterChangeListener registerChangeListener;
-
-        private volatile ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
-                                                             NormalizedNode<?, ?>>> delegate;
-
-        DelayedListenerRegistration(final RegisterChangeListener registerChangeListener) {
-            this.registerChangeListener = registerChangeListener;
-        }
-
-        void setDelegate( final ListenerRegistration<AsyncDataChangeListener<YangInstanceIdentifier,
-                                            NormalizedNode<?, ?>>> registration) {
-            this.delegate = registration;
-        }
-
-        boolean isClosed() {
-            return closed;
-        }
-
-        RegisterChangeListener getRegisterChangeListener() {
-            return registerChangeListener;
-        }
-
-        @Override
-        public AsyncDataChangeListener<YangInstanceIdentifier, NormalizedNode<?, ?>> getInstance() {
-            return delegate != null ? delegate.getInstance() : null;
-        }
-
-        @Override
-        public void close() {
-            closed = true;
-            if(delegate != null) {
-                delegate.close();
-            }
-        }
-    }
 }
index 52762b4eb352ff9de295e44969b13c4410b7f2f0..cff44b13cb3edb1756ed6159e95d8fe259f91fde 100644 (file)
@@ -41,6 +41,8 @@ import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.NonPersistentDataProvider;
+import org.opendaylight.controller.cluster.PersistentDataProvider;
 import org.opendaylight.controller.cluster.common.actor.AbstractUntypedPersistentActorWithMetering;
 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
 import org.opendaylight.controller.cluster.datastore.exceptions.NotInitializedException;
@@ -136,7 +138,7 @@ public class ShardManager extends AbstractUntypedPersistentActorWithMetering {
     }
 
     protected DataPersistenceProvider createDataPersistenceProvider(boolean persistent) {
-        return (persistent) ? new PersistentDataProvider() : new NonPersistentDataProvider();
+        return (persistent) ? new PersistentDataProvider(this) : new NonPersistentDataProvider();
     }
 
     public static Props props(
index aeb4062103d1c0fc77c29f0c733447728a60194f..3a2bcf2336713d2af695065792d62358de4ce9c2 100644 (file)
@@ -11,7 +11,6 @@ package org.opendaylight.controller.cluster.datastore;
 import akka.actor.ActorSelection;
 import akka.dispatch.Futures;
 import akka.dispatch.OnComplete;
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
@@ -25,7 +24,6 @@ import org.opendaylight.controller.cluster.datastore.messages.CanCommitTransacti
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CommitTransactionReply;
 import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
@@ -34,7 +32,7 @@ import scala.runtime.AbstractFunction1;
 /**
  * ThreePhaseCommitCohortProxy represents a set of remote cohort proxies
  */
-public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCohort{
+public class ThreePhaseCommitCohortProxy extends AbstractThreePhaseCommitCohort {
 
     private static final Logger LOG = LoggerFactory.getLogger(ThreePhaseCommitCohortProxy.class);
 
@@ -209,7 +207,7 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
 
     @Override
     public ListenableFuture<Void> commit() {
-        OperationCallback operationCallback = (cohortFutures.size() == 0) ? NO_OP_CALLBACK :
+        OperationCallback operationCallback = cohortFutures.isEmpty() ? NO_OP_CALLBACK :
                 new TransactionRateLimitingCallback(actorContext);
 
         return voidOperation("commit", new CommitTransaction(transactionId).toSerializable(),
@@ -322,7 +320,7 @@ public class ThreePhaseCommitCohortProxy implements DOMStoreThreePhaseCommitCoho
         }, actorContext.getClientDispatcher());
     }
 
-    @VisibleForTesting
+    @Override
     List<Future<ActorSelection>> getCohortFutures() {
         return Collections.unmodifiableList(cohortFutures);
     }
index 58ac1d8b8265bc50fb7d38dea1dd9c1b916211fc..11066edd543413de08591102ba2541d7baec9a0f 100644 (file)
@@ -9,7 +9,6 @@
 package org.opendaylight.controller.cluster.datastore;
 
 import akka.actor.ActorSelection;
-import akka.dispatch.OnComplete;
 import com.google.common.base.Preconditions;
 import java.util.Collections;
 import java.util.List;
@@ -21,18 +20,13 @@ import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransactionChain;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import scala.concurrent.Future;
-import scala.concurrent.Promise;
 
 /**
  * TransactionChainProxy acts as a proxy for a DOMStoreTransactionChain created on a remote shard
  */
 public class TransactionChainProxy implements DOMStoreTransactionChain {
 
-    private static final Logger LOG = LoggerFactory.getLogger(TransactionChainProxy.class);
-
     private interface State {
         boolean isReady();
 
@@ -139,83 +133,4 @@ public class TransactionChainProxy implements DOMStoreTransactionChain {
     private void checkReadyState(State state) {
         Preconditions.checkState(state.isReady(), "Previous transaction is not ready yet");
     }
-
-    private static class ChainedTransactionProxy extends TransactionProxy {
-
-        /**
-         * Stores the ready Futures from the previous Tx in the chain.
-         */
-        private final List<Future<ActorSelection>> previousReadyFutures;
-
-        /**
-         * Stores the ready Futures from this transaction when it is readied.
-         */
-        private volatile List<Future<ActorSelection>> readyFutures;
-
-        private ChainedTransactionProxy(ActorContext actorContext, TransactionType transactionType,
-                String transactionChainId, List<Future<ActorSelection>> previousReadyFutures) {
-            super(actorContext, transactionType, transactionChainId);
-            this.previousReadyFutures = previousReadyFutures;
-        }
-
-        List<Future<ActorSelection>> getReadyFutures() {
-            return readyFutures;
-        }
-
-        boolean isReady() {
-            return readyFutures != null;
-        }
-
-        @Override
-        protected void onTransactionReady(List<Future<ActorSelection>> readyFutures) {
-            LOG.debug("onTransactionReady {} pending readyFutures size {} chain {}", getIdentifier(),
-                    readyFutures.size(), getTransactionChainId());
-            this.readyFutures = readyFutures;
-        }
-
-        /**
-         * This method is overridden to ensure the previous Tx's ready operations complete
-         * before we initiate the next Tx in the chain to avoid creation failures if the
-         * previous Tx's ready operations haven't completed yet.
-         */
-        @Override
-        protected Future<ActorSelection> sendFindPrimaryShardAsync(final String shardName) {
-            // Check if there are any previous ready Futures, otherwise let the super class handle it.
-            if(previousReadyFutures.isEmpty()) {
-                return super.sendFindPrimaryShardAsync(shardName);
-            }
-
-            if(LOG.isDebugEnabled()) {
-                LOG.debug("Waiting for {} previous ready futures for Tx {} on chain {}",
-                        previousReadyFutures.size(), getIdentifier(), getTransactionChainId());
-            }
-
-            // Combine the ready Futures into 1.
-            Future<Iterable<ActorSelection>> combinedFutures = akka.dispatch.Futures.sequence(
-                    previousReadyFutures, getActorContext().getClientDispatcher());
-
-            // Add a callback for completion of the combined Futures.
-            final Promise<ActorSelection> returnPromise = akka.dispatch.Futures.promise();
-            OnComplete<Iterable<ActorSelection>> onComplete = new OnComplete<Iterable<ActorSelection>>() {
-                @Override
-                public void onComplete(Throwable failure, Iterable<ActorSelection> notUsed) {
-                    if(failure != null) {
-                        // A Ready Future failed so fail the returned Promise.
-                        returnPromise.failure(failure);
-                    } else {
-                        LOG.debug("Previous Tx readied - sending FindPrimaryShard for {} on chain {}",
-                                getIdentifier(), getTransactionChainId());
-
-                        // Send the FindPrimaryShard message and use the resulting Future to complete the
-                        // returned Promise.
-                        returnPromise.completeWith(ChainedTransactionProxy.super.sendFindPrimaryShardAsync(shardName));
-                    }
-                }
-            };
-
-            combinedFutures.onComplete(onComplete, getActorContext().getClientDispatcher());
-
-            return returnPromise.future();
-        }
-    }
 }
index 1b8e65e02d6d1bad037a02beaa77310088b6e67d..a5a7494e1a0930d6dab535b920198a1d4773f8a5 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.cluster.datastore;
 import akka.actor.ActorSelection;
 import com.google.common.base.Optional;
 import com.google.common.util.concurrent.SettableFuture;
-import java.util.List;
+import java.util.Collection;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import scala.concurrent.Future;
@@ -34,5 +34,5 @@ interface TransactionContext {
 
     void dataExists(YangInstanceIdentifier path, SettableFuture<Boolean> proxyFuture);
 
-    List<Future<Object>> getRecordedOperationFutures();
+    void copyRecordedOperationFutures(Collection<Future<Object>> target);
 }
index 3a209630c3344ca149032c2cc1d4f06b134ccf42..c61682d8efe98cf1649bebc03b381b6afeeb1d76 100644 (file)
@@ -86,7 +86,7 @@ public class TransactionContextImpl extends AbstractTransactionContext {
 
     @Override
     public void closeTransaction() {
-        LOG.debug("Tx {} closeTransaction called", identifier);
+        LOG.debug("Tx {} closeTransaction called", getIdentifier());
 
         actorContext.sendOperationAsync(getActor(), CloseTransaction.INSTANCE.toSerializable());
     }
@@ -94,7 +94,7 @@ public class TransactionContextImpl extends AbstractTransactionContext {
     @Override
     public Future<ActorSelection> readyTransaction() {
         LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
-                identifier, recordedOperationFutures.size());
+            getIdentifier(), recordedOperationCount());
 
         // Send the remaining batched modifications if any.
 
@@ -113,8 +113,8 @@ public class TransactionContextImpl extends AbstractTransactionContext {
         // Future will fail. We need all prior operations and the ready operation to succeed
         // in order to attempt commit.
 
-        List<Future<Object>> futureList = Lists.newArrayListWithCapacity(recordedOperationFutures.size() + 1);
-        futureList.addAll(recordedOperationFutures);
+        List<Future<Object>> futureList = Lists.newArrayListWithCapacity(recordedOperationCount() + 1);
+        copyRecordedOperationFutures(futureList);
         futureList.add(withLastReplyFuture);
 
         Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(futureList,
@@ -127,7 +127,7 @@ public class TransactionContextImpl extends AbstractTransactionContext {
             @Override
             public ActorSelection checkedApply(Iterable<Object> notUsed) {
                 LOG.debug("Tx {} readyTransaction: pending recorded operations succeeded",
-                        identifier);
+                    getIdentifier());
 
                 // At this point all the Futures succeeded and we need to extract the cohort
                 // actor path from the ReadyTransactionReply. For the recorded operations, they
@@ -149,7 +149,7 @@ public class TransactionContextImpl extends AbstractTransactionContext {
                 } else {
                     // Throwing an exception here will fail the Future.
                     throw new IllegalArgumentException(String.format("%s: Invalid reply type %s",
-                            identifier, serializedReadyReply.getClass()));
+                        getIdentifier(), serializedReadyReply.getClass()));
                 }
             }
         }, TransactionProxy.SAME_FAILURE_TRANSFORMER, actorContext.getClientDispatcher());
@@ -161,7 +161,7 @@ public class TransactionContextImpl extends AbstractTransactionContext {
 
     private void batchModification(Modification modification) {
         if(batchedModifications == null) {
-            batchedModifications = new BatchedModifications(identifier.toString(), remoteTransactionVersion,
+            batchedModifications = new BatchedModifications(getIdentifier().toString(), remoteTransactionVersion,
                     transactionChainId);
         }
 
@@ -176,7 +176,7 @@ public class TransactionContextImpl extends AbstractTransactionContext {
     private void sendAndRecordBatchedModifications() {
         Future<Object> sentFuture = sendBatchedModifications();
         if(sentFuture != null) {
-            recordedOperationFutures.add(sentFuture);
+            recordOperationFuture(sentFuture);
         }
     }
 
@@ -188,14 +188,14 @@ public class TransactionContextImpl extends AbstractTransactionContext {
         Future<Object> sent = null;
         if(batchedModifications != null) {
             if(LOG.isDebugEnabled()) {
-                LOG.debug("Tx {} sending {} batched modifications, ready: {}", identifier,
+                LOG.debug("Tx {} sending {} batched modifications, ready: {}", getIdentifier(),
                         batchedModifications.getModifications().size(), ready);
             }
 
             batchedModifications.setReady(ready);
             sent = executeOperationAsync(batchedModifications);
 
-            batchedModifications = new BatchedModifications(identifier.toString(), remoteTransactionVersion,
+            batchedModifications = new BatchedModifications(getIdentifier().toString(), remoteTransactionVersion,
                     transactionChainId);
         }
 
@@ -204,89 +204,46 @@ public class TransactionContextImpl extends AbstractTransactionContext {
 
     @Override
     public void deleteData(YangInstanceIdentifier path) {
-        LOG.debug("Tx {} deleteData called path = {}", identifier, path);
+        LOG.debug("Tx {} deleteData called path = {}", getIdentifier(), path);
 
         batchModification(new DeleteModification(path));
     }
 
     @Override
     public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-        LOG.debug("Tx {} mergeData called path = {}", identifier, path);
+        LOG.debug("Tx {} mergeData called path = {}", getIdentifier(), path);
 
         batchModification(new MergeModification(path, data));
     }
 
     @Override
     public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-        LOG.debug("Tx {} writeData called path = {}", identifier, path);
+        LOG.debug("Tx {} writeData called path = {}", getIdentifier(), path);
 
         batchModification(new WriteModification(path, data));
     }
 
     @Override
-    public void readData(
-            final YangInstanceIdentifier path,final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture ) {
+    public void readData(final YangInstanceIdentifier path,
+            final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture ) {
 
-        LOG.debug("Tx {} readData called path = {}", identifier, path);
+        LOG.debug("Tx {} readData called path = {}", getIdentifier(), path);
 
-        // Send the remaining batched modifications if any.
+        // Send any batched modifications. This is necessary to honor the read uncommitted semantics of the
+        // public API contract.
 
         sendAndRecordBatchedModifications();
 
-        // If there were any previous recorded put/merge/delete operation reply Futures then we
-        // must wait for them to successfully complete. This is necessary to honor the read
-        // uncommitted semantics of the public API contract. If any one fails then fail the read.
-
-        if(recordedOperationFutures.isEmpty()) {
-            finishReadData(path, returnFuture);
-        } else {
-            LOG.debug("Tx {} readData: verifying {} previous recorded operations",
-                    identifier, recordedOperationFutures.size());
-
-            // Note: we make a copy of recordedOperationFutures to be on the safe side in case
-            // Futures#sequence accesses the passed List on a different thread, as
-            // recordedOperationFutures is not synchronized.
-
-            Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
-                    Lists.newArrayList(recordedOperationFutures),
-                    actorContext.getClientDispatcher());
-
-            OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
-                @Override
-                public void onComplete(Throwable failure, Iterable<Object> notUsed)
-                        throws Throwable {
-                    if(failure != null) {
-                        LOG.debug("Tx {} readData: a recorded operation failed: {}",
-                                identifier, failure);
-                        returnFuture.setException(new ReadFailedException(
-                                "The read could not be performed because a previous put, merge,"
-                                + "or delete operation failed", failure));
-                    } else {
-                        finishReadData(path, returnFuture);
-                    }
-                }
-            };
-
-            combinedFutures.onComplete(onComplete, actorContext.getClientDispatcher());
-        }
-
-    }
-
-    private void finishReadData(final YangInstanceIdentifier path,
-            final SettableFuture<Optional<NormalizedNode<?, ?>>> returnFuture) {
-
-        LOG.debug("Tx {} finishReadData called path = {}", identifier, path);
-
         OnComplete<Object> onComplete = new OnComplete<Object>() {
             @Override
             public void onComplete(Throwable failure, Object readResponse) throws Throwable {
                 if(failure != null) {
-                    LOG.debug("Tx {} read operation failed: {}", identifier, failure);
+                    LOG.debug("Tx {} read operation failed: {}", getIdentifier(), failure);
                     returnFuture.setException(new ReadFailedException(
                             "Error reading data for path " + path, failure));
 
                 } else {
-                    LOG.debug("Tx {} read operation succeeded", identifier, failure);
+                    LOG.debug("Tx {} read operation succeeded", getIdentifier(), failure);
 
                     if (readResponse instanceof ReadDataReply) {
                         ReadDataReply reply = (ReadDataReply) readResponse;
@@ -312,64 +269,22 @@ public class TransactionContextImpl extends AbstractTransactionContext {
     @Override
     public void dataExists(final YangInstanceIdentifier path, final SettableFuture<Boolean> returnFuture) {
 
-        LOG.debug("Tx {} dataExists called path = {}", identifier, path);
+        LOG.debug("Tx {} dataExists called path = {}", getIdentifier(), path);
 
-        // Send the remaining batched modifications if any.
+        // Send any batched modifications. This is necessary to honor the read uncommitted semantics of the
+        // public API contract.
 
         sendAndRecordBatchedModifications();
 
-        // If there were any previous recorded put/merge/delete operation reply Futures then we
-        // must wait for them to successfully complete. This is necessary to honor the read
-        // uncommitted semantics of the public API contract. If any one fails then fail this
-        // request.
-
-        if(recordedOperationFutures.isEmpty()) {
-            finishDataExists(path, returnFuture);
-        } else {
-            LOG.debug("Tx {} dataExists: verifying {} previous recorded operations",
-                    identifier, recordedOperationFutures.size());
-
-            // Note: we make a copy of recordedOperationFutures to be on the safe side in case
-            // Futures#sequence accesses the passed List on a different thread, as
-            // recordedOperationFutures is not synchronized.
-
-            Future<Iterable<Object>> combinedFutures = akka.dispatch.Futures.sequence(
-                    Lists.newArrayList(recordedOperationFutures),
-                    actorContext.getClientDispatcher());
-            OnComplete<Iterable<Object>> onComplete = new OnComplete<Iterable<Object>>() {
-                @Override
-                public void onComplete(Throwable failure, Iterable<Object> notUsed)
-                        throws Throwable {
-                    if(failure != null) {
-                        LOG.debug("Tx {} dataExists: a recorded operation failed: {}",
-                                identifier, failure);
-                        returnFuture.setException(new ReadFailedException(
-                                "The data exists could not be performed because a previous "
-                                + "put, merge, or delete operation failed", failure));
-                    } else {
-                        finishDataExists(path, returnFuture);
-                    }
-                }
-            };
-
-            combinedFutures.onComplete(onComplete, actorContext.getClientDispatcher());
-        }
-    }
-
-    private void finishDataExists(final YangInstanceIdentifier path,
-            final SettableFuture<Boolean> returnFuture) {
-
-        LOG.debug("Tx {} finishDataExists called path = {}", identifier, path);
-
         OnComplete<Object> onComplete = new OnComplete<Object>() {
             @Override
             public void onComplete(Throwable failure, Object response) throws Throwable {
                 if(failure != null) {
-                    LOG.debug("Tx {} dataExists operation failed: {}", identifier, failure);
+                    LOG.debug("Tx {} dataExists operation failed: {}", getIdentifier(), failure);
                     returnFuture.setException(new ReadFailedException(
                             "Error checking data exists for path " + path, failure));
                 } else {
-                    LOG.debug("Tx {} dataExists operation succeeded", identifier, failure);
+                    LOG.debug("Tx {} dataExists operation succeeded", getIdentifier(), failure);
 
                     if (response instanceof DataExistsReply) {
                         returnFuture.set(Boolean.valueOf(((DataExistsReply) response).exists()));
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionOperation.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionOperation.java
new file mode 100644 (file)
index 0000000..dc965ed
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+/**
+ * Abstract superclass for transaction operations which should be executed
+ * on a {@link TransactionContext} at a later point in time.
+ */
+abstract class TransactionOperation {
+    /**
+     * Execute the delayed operation.
+     *
+     * @param transactionContext
+     */
+    protected abstract void invoke(TransactionContext transactionContext);
+}
index 64f914b19fbebfa517afb2a7f23deb473a46a1a5..59c9298499c4ed0a58961b57fe40019f15214de1 100644 (file)
@@ -12,21 +12,16 @@ import akka.actor.ActorSelection;
 import akka.dispatch.Mapper;
 import akka.dispatch.OnComplete;
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.FinalizablePhantomReference;
-import com.google.common.base.FinalizableReferenceQueue;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.util.concurrent.CheckedFuture;
-import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.SettableFuture;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Semaphore;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -35,14 +30,13 @@ import javax.annotation.concurrent.GuardedBy;
 import org.opendaylight.controller.cluster.datastore.compat.PreLithiumTransactionContextImpl;
 import org.opendaylight.controller.cluster.datastore.exceptions.NoShardLeaderException;
 import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
-import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.CreateTransactionReply;
 import org.opendaylight.controller.cluster.datastore.shardstrategy.ShardStrategyFactory;
 import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.yangtools.util.concurrent.MappingCheckedFuture;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
@@ -65,26 +59,31 @@ import scala.concurrent.duration.FiniteDuration;
  * shards will be executed.
  * </p>
  */
-public class TransactionProxy implements DOMStoreReadWriteTransaction {
+public class TransactionProxy extends AbstractDOMStoreTransaction<TransactionIdentifier> implements DOMStoreReadWriteTransaction {
 
     public static enum TransactionType {
         READ_ONLY,
         WRITE_ONLY,
         READ_WRITE;
 
-        public static TransactionType fromInt(int type) {
-            if(type == WRITE_ONLY.ordinal()) {
-                return WRITE_ONLY;
-            } else if(type == READ_WRITE.ordinal()) {
-                return READ_WRITE;
-            } else if(type == READ_ONLY.ordinal()) {
-                return READ_ONLY;
-            } else {
-                throw new IllegalArgumentException("In TransactionType enum value" + type);
+        // Cache all values
+        private static final TransactionType[] VALUES = values();
+
+        public static TransactionType fromInt(final int type) {
+            try {
+                return VALUES[type];
+            } catch (IndexOutOfBoundsException e) {
+                throw new IllegalArgumentException("In TransactionType enum value " + type, e);
             }
         }
     }
 
+    private static enum TransactionState {
+        OPEN,
+        READY,
+        CLOSED,
+    }
+
     static final Mapper<Throwable, Throwable> SAME_FAILURE_TRANSFORMER =
                                                               new Mapper<Throwable, Throwable>() {
         @Override
@@ -103,72 +102,6 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
     private static final FiniteDuration CREATE_TX_TRY_INTERVAL =
             FiniteDuration.create(1, TimeUnit.SECONDS);
 
-    /**
-     * Used to enqueue the PhantomReferences for read-only TransactionProxy instances. The
-     * FinalizableReferenceQueue is safe to use statically in an OSGi environment as it uses some
-     * trickery to clean up its internal thread when the bundle is unloaded.
-     */
-    private static final FinalizableReferenceQueue phantomReferenceQueue =
-                                                                  new FinalizableReferenceQueue();
-
-    /**
-     * This stores the TransactionProxyCleanupPhantomReference instances statically, This is
-     * necessary because PhantomReferences need a hard reference so they're not garbage collected.
-     * Once finalized, the TransactionProxyCleanupPhantomReference removes itself from this map
-     * and thus becomes eligible for garbage collection.
-     */
-    private static final Map<TransactionProxyCleanupPhantomReference,
-                             TransactionProxyCleanupPhantomReference> phantomReferenceCache =
-                                                                        new ConcurrentHashMap<>();
-
-    /**
-     * A PhantomReference that closes remote transactions for a TransactionProxy when it's
-     * garbage collected. This is used for read-only transactions as they're not explicitly closed
-     * by clients. So the only way to detect that a transaction is no longer in use and it's safe
-     * to clean up is when it's garbage collected. It's inexact as to when an instance will be GC'ed
-     * but TransactionProxy instances should generally be short-lived enough to avoid being moved
-     * to the old generation space and thus should be cleaned up in a timely manner as the GC
-     * runs on the young generation (eden, swap1...) space much more frequently.
-     */
-    private static class TransactionProxyCleanupPhantomReference
-                                           extends FinalizablePhantomReference<TransactionProxy> {
-
-        private final List<ActorSelection> remoteTransactionActors;
-        private final AtomicBoolean remoteTransactionActorsMB;
-        private final ActorContext actorContext;
-        private final TransactionIdentifier identifier;
-
-        protected TransactionProxyCleanupPhantomReference(TransactionProxy referent) {
-            super(referent, phantomReferenceQueue);
-
-            // Note we need to cache the relevant fields from the TransactionProxy as we can't
-            // have a hard reference to the TransactionProxy instance itself.
-
-            remoteTransactionActors = referent.remoteTransactionActors;
-            remoteTransactionActorsMB = referent.remoteTransactionActorsMB;
-            actorContext = referent.actorContext;
-            identifier = referent.identifier;
-        }
-
-        @Override
-        public void finalizeReferent() {
-            LOG.trace("Cleaning up {} Tx actors for TransactionProxy {}",
-                    remoteTransactionActors.size(), identifier);
-
-            phantomReferenceCache.remove(this);
-
-            // Access the memory barrier volatile to ensure all previous updates to the
-            // remoteTransactionActors list are visible to this thread.
-
-            if(remoteTransactionActorsMB.get()) {
-                for(ActorSelection actor : remoteTransactionActors) {
-                    LOG.trace("Sending CloseTransaction to {}", actor);
-                    actorContext.sendOperationAsync(actor, CloseTransaction.INSTANCE.toSerializable());
-                }
-            }
-        }
-    }
-
     /**
      * Stores the remote Tx actors for each requested data store path to be used by the
      * PhantomReference to close the remote Tx's. This is only used for read-only Tx's. The
@@ -176,8 +109,8 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
      * remoteTransactionActors list so they will be visible to the thread accessing the
      * PhantomReference.
      */
-    private List<ActorSelection> remoteTransactionActors;
-    private volatile AtomicBoolean remoteTransactionActorsMB;
+    List<ActorSelection> remoteTransactionActors;
+    volatile AtomicBoolean remoteTransactionActorsMB;
 
     /**
      * Stores the create transaction results per shard.
@@ -185,11 +118,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
     private final Map<String, TransactionFutureCallback> txFutureCallbackMap = new HashMap<>();
 
     private final TransactionType transactionType;
-    private final ActorContext actorContext;
-    private final TransactionIdentifier identifier;
+    final ActorContext actorContext;
     private final String transactionChainId;
     private final SchemaContext schemaContext;
-    private boolean inReadyState;
+    private TransactionState state = TransactionState.OPEN;
 
     private volatile boolean initialized;
     private Semaphore operationLimiter;
@@ -199,8 +131,8 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         this(actorContext, transactionType, "");
     }
 
-    public TransactionProxy(ActorContext actorContext, TransactionType transactionType,
-            String transactionChainId) {
+    public TransactionProxy(ActorContext actorContext, TransactionType transactionType, String transactionChainId) {
+        super(createIdentifier(actorContext));
         this.actorContext = Preconditions.checkNotNull(actorContext,
             "actorContext should not be null");
         this.transactionType = Preconditions.checkNotNull(transactionType,
@@ -209,14 +141,16 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             "schemaContext should not be null");
         this.transactionChainId = transactionChainId;
 
+        LOG.debug("Created txn {} of type {} on chain {}", getIdentifier(), transactionType, transactionChainId);
+    }
+
+    private static TransactionIdentifier createIdentifier(ActorContext actorContext) {
         String memberName = actorContext.getCurrentMemberName();
-        if(memberName == null){
+        if (memberName == null) {
             memberName = "UNKNOWN-MEMBER";
         }
 
-        this.identifier = new TransactionIdentifier(memberName, counter.getAndIncrement());
-
-        LOG.debug("Created txn {} of type {} on chain {}", identifier, transactionType, transactionChainId);
+        return new TransactionIdentifier(memberName, counter.getAndIncrement());
     }
 
     @VisibleForTesting
@@ -224,8 +158,8 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         List<Future<Object>> recordedOperationFutures = Lists.newArrayList();
         for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
             TransactionContext transactionContext = txFutureCallback.getTransactionContext();
-            if(transactionContext != null) {
-                recordedOperationFutures.addAll(transactionContext.getRecordedOperationFutures());
+            if (transactionContext != null) {
+                transactionContext.copyRecordedOperationFutures(recordedOperationFutures);
             }
         }
 
@@ -250,7 +184,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
                 "Read operation on write-only transaction is not allowed");
 
-        LOG.debug("Tx {} read {}", identifier, path);
+        LOG.debug("Tx {} read {}", getIdentifier(), path);
 
         throttleOperation();
 
@@ -273,7 +207,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         Preconditions.checkState(transactionType != TransactionType.WRITE_ONLY,
                 "Exists operation on write-only transaction is not allowed");
 
-        LOG.debug("Tx {} exists {}", identifier, path);
+        LOG.debug("Tx {} exists {}", getIdentifier(), path);
 
         throttleOperation();
 
@@ -293,7 +227,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
     private void checkModificationState() {
         Preconditions.checkState(transactionType != TransactionType.READ_ONLY,
                 "Modification operation on read-only transaction is not allowed");
-        Preconditions.checkState(!inReadyState,
+        Preconditions.checkState(state == TransactionState.OPEN,
                 "Transaction is sealed - further modifications are not allowed");
     }
 
@@ -326,13 +260,12 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         }
     }
 
-
     @Override
     public void write(final YangInstanceIdentifier path, final NormalizedNode<?, ?> data) {
 
         checkModificationState();
 
-        LOG.debug("Tx {} write {}", identifier, path);
+        LOG.debug("Tx {} write {}", getIdentifier(), path);
 
         throttleOperation();
 
@@ -350,7 +283,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         checkModificationState();
 
-        LOG.debug("Tx {} merge {}", identifier, path);
+        LOG.debug("Tx {} merge {}", getIdentifier(), path);
 
         throttleOperation();
 
@@ -368,7 +301,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
 
         checkModificationState();
 
-        LOG.debug("Tx {} delete {}", identifier, path);
+        LOG.debug("Tx {} delete {}", getIdentifier(), path);
 
         throttleOperation();
 
@@ -381,28 +314,37 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         });
     }
 
-    @Override
-    public DOMStoreThreePhaseCommitCohort ready() {
+    private boolean seal(final TransactionState newState) {
+        if (state == TransactionState.OPEN) {
+            state = newState;
+            return true;
+        } else {
+            return false;
+        }
+    }
 
-        checkModificationState();
+    @Override
+    public AbstractThreePhaseCommitCohort ready() {
+        Preconditions.checkState(transactionType != TransactionType.READ_ONLY,
+                "Read-only transactions cannot be readied");
 
-        inReadyState = true;
+        final boolean success = seal(TransactionState.READY);
+        Preconditions.checkState(success, "Transaction %s is %s, it cannot be readied", getIdentifier(), state);
 
-        LOG.debug("Tx {} Readying {} transactions for commit", identifier,
+        LOG.debug("Tx {} Readying {} transactions for commit", getIdentifier(),
                     txFutureCallbackMap.size());
 
-        if(txFutureCallbackMap.size() == 0) {
-            onTransactionReady(Collections.<Future<ActorSelection>>emptyList());
+        if (txFutureCallbackMap.isEmpty()) {
+            TransactionRateLimitingCallback.adjustRateLimitForUnusedTransaction(actorContext);
             return NoOpDOMStoreThreePhaseCommitCohort.INSTANCE;
         }
 
         throttleOperation(txFutureCallbackMap.size());
 
-        List<Future<ActorSelection>> cohortFutures = Lists.newArrayList();
-
+        List<Future<ActorSelection>> cohortFutures = new ArrayList<>(txFutureCallbackMap.size());
         for(TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
 
-            LOG.debug("Tx {} Readying transaction for shard {} chain {}", identifier,
+            LOG.debug("Tx {} Readying transaction for shard {} chain {}", getIdentifier(),
                         txFutureCallback.getShardName(), transactionChainId);
 
             final TransactionContext transactionContext = txFutureCallback.getTransactionContext();
@@ -424,27 +366,22 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             cohortFutures.add(future);
         }
 
-        onTransactionReady(cohortFutures);
-
         return new ThreePhaseCommitCohortProxy(actorContext, cohortFutures,
-                identifier.toString());
-    }
-
-    /**
-     * Method for derived classes to be notified when the transaction has been readied.
-     *
-     * @param cohortFutures the cohort Futures for each shard transaction.
-     */
-    protected void onTransactionReady(List<Future<ActorSelection>> cohortFutures) {
-    }
-
-    @Override
-    public Object getIdentifier() {
-        return this.identifier;
+            getIdentifier().toString());
     }
 
     @Override
     public void close() {
+        if (!seal(TransactionState.CLOSED)) {
+            if (state == TransactionState.CLOSED) {
+                // Idempotent no-op as per AutoCloseable recommendation
+                return;
+            }
+
+            throw new IllegalStateException(String.format("Transaction %s is ready, it cannot be closed",
+                getIdentifier()));
+        }
+
         for (TransactionFutureCallback txFutureCallback : txFutureCallbackMap.values()) {
             txFutureCallback.enqueueTransactionOperation(new TransactionOperation() {
                 @Override
@@ -504,13 +441,6 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         return actorContext;
     }
 
-    /**
-     * Interfaces for transaction operations to be invoked later.
-     */
-    private static interface TransactionOperation {
-        void invoke(TransactionContext transactionContext);
-    }
-
     /**
      * Implements a Future OnComplete callback for a CreateTransaction message. This class handles
      * retries, up to a limit, if the shard doesn't have a leader yet. This is done by scheduling a
@@ -567,7 +497,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             if(transactionType == TransactionType.WRITE_ONLY &&
                     actorContext.getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
                 LOG.debug("Tx {} Primary shard {} found - creating WRITE_ONLY transaction context",
-                        identifier, primaryShard);
+                    getIdentifier(), primaryShard);
 
                 // For write-only Tx's we prepare the transaction modifications directly on the shard actor
                 // to avoid the overhead of creating a separate transaction actor.
@@ -586,7 +516,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             boolean invokeOperation = true;
             synchronized(txOperationsOnComplete) {
                 if(transactionContext == null) {
-                    LOG.debug("Tx {} Adding operation on complete", identifier);
+                    LOG.debug("Tx {} Adding operation on complete", getIdentifier());
 
                     invokeOperation = false;
                     txOperationsOnComplete.add(operation);
@@ -614,10 +544,10 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
          */
         private void tryCreateTransaction() {
             if(LOG.isDebugEnabled()) {
-                LOG.debug("Tx {} Primary shard {} found - trying create transaction", identifier, primaryShard);
+                LOG.debug("Tx {} Primary shard {} found - trying create transaction", getIdentifier(), primaryShard);
             }
 
-            Object serializedCreateMessage = new CreateTransaction(identifier.toString(),
+            Object serializedCreateMessage = new CreateTransaction(getIdentifier().toString(),
                     TransactionProxy.this.transactionType.ordinal(),
                     getTransactionChainId()).toSerializable();
 
@@ -635,7 +565,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
                 // is ok.
                 if(--createTxTries > 0) {
                     LOG.debug("Tx {} Shard {} has no leader yet - scheduling create Tx retry",
-                            identifier, shardName);
+                        getIdentifier(), shardName);
 
                     actorContext.getActorSystem().scheduler().scheduleOnce(CREATE_TX_TRY_INTERVAL,
                             new Runnable() {
@@ -667,17 +597,17 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             // TransactionContext until after we've executed all cached TransactionOperations.
             TransactionContext localTransactionContext;
             if(failure != null) {
-                LOG.debug("Tx {} Creating NoOpTransaction because of error", identifier, failure);
+                LOG.debug("Tx {} Creating NoOpTransaction because of error", getIdentifier(), failure);
 
-                localTransactionContext = new NoOpTransactionContext(failure, identifier, operationLimiter);
-            } else if (response.getClass().equals(CreateTransactionReply.SERIALIZABLE_CLASS)) {
+                localTransactionContext = new NoOpTransactionContext(failure, getIdentifier(), operationLimiter);
+            } else if (CreateTransactionReply.SERIALIZABLE_CLASS.equals(response.getClass())) {
                 localTransactionContext = createValidTransactionContext(
                         CreateTransactionReply.fromSerializable(response));
             } else {
                 IllegalArgumentException exception = new IllegalArgumentException(String.format(
                         "Invalid reply type %s for CreateTransaction", response.getClass()));
 
-                localTransactionContext = new NoOpTransactionContext(exception, identifier, operationLimiter);
+                localTransactionContext = new NoOpTransactionContext(exception, getIdentifier(), operationLimiter);
             }
 
             executeTxOperatonsOnComplete(localTransactionContext);
@@ -717,7 +647,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
         }
 
         private TransactionContext createValidTransactionContext(CreateTransactionReply reply) {
-            LOG.debug("Tx {} Received {}", identifier, reply);
+            LOG.debug("Tx {} Received {}", getIdentifier(), reply);
 
             return createValidTransactionContext(actorContext.actorSelection(reply.getTransactionPath()),
                     reply.getTransactionPath(), reply.getVersion());
@@ -735,9 +665,7 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
                     remoteTransactionActors = Lists.newArrayList();
                     remoteTransactionActorsMB = new AtomicBoolean();
 
-                    TransactionProxyCleanupPhantomReference cleanup =
-                            new TransactionProxyCleanupPhantomReference(TransactionProxy.this);
-                    phantomReferenceCache.put(cleanup, cleanup);
+                    TransactionProxyCleanupPhantomReference.track(TransactionProxy.this);
                 }
 
                 // Add the actor to the remoteTransactionActors list for access by the
@@ -754,49 +682,17 @@ public class TransactionProxy implements DOMStoreReadWriteTransaction {
             boolean isTxActorLocal = actorContext.isPathLocal(transactionPath);
 
             if(remoteTransactionVersion < DataStoreVersions.LITHIUM_VERSION) {
-                return new PreLithiumTransactionContextImpl(transactionPath, transactionActor, identifier,
+                return new PreLithiumTransactionContextImpl(transactionPath, transactionActor, getIdentifier(),
                         transactionChainId, actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion,
                         operationCompleter);
             } else if (transactionType == TransactionType.WRITE_ONLY &&
                     actorContext.getDatastoreContext().isWriteOnlyTransactionOptimizationsEnabled()) {
-                return new WriteOnlyTransactionContextImpl(transactionActor, identifier, transactionChainId,
+                return new WriteOnlyTransactionContextImpl(transactionActor, getIdentifier(), transactionChainId,
                     actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion, operationCompleter);
             } else {
-                return new TransactionContextImpl(transactionActor, identifier, transactionChainId,
+                return new TransactionContextImpl(transactionActor, getIdentifier(), transactionChainId,
                         actorContext, schemaContext, isTxActorLocal, remoteTransactionVersion, operationCompleter);
             }
         }
     }
-
-    private static class NoOpDOMStoreThreePhaseCommitCohort implements DOMStoreThreePhaseCommitCohort {
-        static NoOpDOMStoreThreePhaseCommitCohort INSTANCE = new NoOpDOMStoreThreePhaseCommitCohort();
-
-        private static final ListenableFuture<Void> IMMEDIATE_VOID_SUCCESS =
-                com.google.common.util.concurrent.Futures.immediateFuture(null);
-        private static final ListenableFuture<Boolean> IMMEDIATE_BOOLEAN_SUCCESS =
-                com.google.common.util.concurrent.Futures.immediateFuture(Boolean.TRUE);
-
-        private NoOpDOMStoreThreePhaseCommitCohort() {
-        }
-
-        @Override
-        public ListenableFuture<Boolean> canCommit() {
-            return IMMEDIATE_BOOLEAN_SUCCESS;
-        }
-
-        @Override
-        public ListenableFuture<Void> preCommit() {
-            return IMMEDIATE_VOID_SUCCESS;
-        }
-
-        @Override
-        public ListenableFuture<Void> abort() {
-            return IMMEDIATE_VOID_SUCCESS;
-        }
-
-        @Override
-        public ListenableFuture<Void> commit() {
-            return IMMEDIATE_VOID_SUCCESS;
-        }
-    }
 }
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxyCleanupPhantomReference.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/TransactionProxyCleanupPhantomReference.java
new file mode 100644 (file)
index 0000000..77834d9
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore;
+
+import akka.actor.ActorSelection;
+import com.google.common.base.FinalizablePhantomReference;
+import com.google.common.base.FinalizableReferenceQueue;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import org.opendaylight.controller.cluster.datastore.identifiers.TransactionIdentifier;
+import org.opendaylight.controller.cluster.datastore.messages.CloseTransaction;
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A PhantomReference that closes remote transactions for a TransactionProxy when it's
+ * garbage collected. This is used for read-only transactions as they're not explicitly closed
+ * by clients. So the only way to detect that a transaction is no longer in use and it's safe
+ * to clean up is when it's garbage collected. It's inexact as to when an instance will be GC'ed
+ * but TransactionProxy instances should generally be short-lived enough to avoid being moved
+ * to the old generation space and thus should be cleaned up in a timely manner as the GC
+ * runs on the young generation (eden, swap1...) space much more frequently.
+ */
+final class TransactionProxyCleanupPhantomReference
+                                       extends FinalizablePhantomReference<TransactionProxy> {
+    private static final Logger LOG = LoggerFactory.getLogger(TransactionProxyCleanupPhantomReference.class);
+    /**
+     * Used to enqueue the PhantomReferences for read-only TransactionProxy instances. The
+     * FinalizableReferenceQueue is safe to use statically in an OSGi environment as it uses some
+     * trickery to clean up its internal thread when the bundle is unloaded.
+     */
+    private static final FinalizableReferenceQueue phantomReferenceQueue =
+                                                                  new FinalizableReferenceQueue();
+
+    /**
+     * This stores the TransactionProxyCleanupPhantomReference instances statically, This is
+     * necessary because PhantomReferences need a hard reference so they're not garbage collected.
+     * Once finalized, the TransactionProxyCleanupPhantomReference removes itself from this map
+     * and thus becomes eligible for garbage collection.
+     */
+    private static final Map<TransactionProxyCleanupPhantomReference,
+                             TransactionProxyCleanupPhantomReference> phantomReferenceCache =
+                                                                        new ConcurrentHashMap<>();
+
+    private final List<ActorSelection> remoteTransactionActors;
+    private final AtomicBoolean remoteTransactionActorsMB;
+    private final ActorContext actorContext;
+    private final TransactionIdentifier identifier;
+
+    private TransactionProxyCleanupPhantomReference(TransactionProxy referent) {
+        super(referent, phantomReferenceQueue);
+
+        // Note we need to cache the relevant fields from the TransactionProxy as we can't
+        // have a hard reference to the TransactionProxy instance itself.
+
+        remoteTransactionActors = referent.remoteTransactionActors;
+        remoteTransactionActorsMB = referent.remoteTransactionActorsMB;
+        actorContext = referent.actorContext;
+        identifier = referent.getIdentifier();
+    }
+
+    static void track(TransactionProxy referent) {
+        final TransactionProxyCleanupPhantomReference ret = new TransactionProxyCleanupPhantomReference(referent);
+        phantomReferenceCache.put(ret, ret);
+    }
+
+    @Override
+    public void finalizeReferent() {
+        LOG.trace("Cleaning up {} Tx actors for TransactionProxy {}",
+                remoteTransactionActors.size(), identifier);
+
+        phantomReferenceCache.remove(this);
+
+        // Access the memory barrier volatile to ensure all previous updates to the
+        // remoteTransactionActors list are visible to this thread.
+
+        if(remoteTransactionActorsMB.get()) {
+            for(ActorSelection actor : remoteTransactionActors) {
+                LOG.trace("Sending CloseTransaction to {}", actor);
+                actorContext.sendOperationAsync(actor, CloseTransaction.INSTANCE.toSerializable());
+            }
+        }
+    }
+}
\ No newline at end of file
index 1202a909d54bfb8faf69adce2a47cded7391853d..526ce59aabe370246b2742bf068d25b9befcc764 100644 (file)
@@ -44,11 +44,30 @@ public class TransactionRateLimitingCallback implements OperationCallback{
         Preconditions.checkState(timerContext != null, "Call run before success");
         timerContext.stop();
 
+        double newRateLimit = calculateNewRateLimit(commitTimer, actorContext.getDatastoreContext());
+
+        LOG.debug("Data Store {} commit rateLimit adjusted to {}", actorContext.getDataStoreType(), newRateLimit);
+
+        actorContext.setTxCreationLimit(newRateLimit);
+    }
+
+    @Override
+    public void failure() {
+        // This would mean we couldn't get a transaction completed in 30 seconds which is
+        // the default transaction commit timeout. Using the timeout information to figure out the rate limit is
+        // not going to be useful - so we leave it as it is
+    }
+
+    private static double calculateNewRateLimit(Timer commitTimer, DatastoreContext context) {
+        if(commitTimer == null) {
+            // This can happen in unit tests.
+            return 0;
+        }
+
         Snapshot timerSnapshot = commitTimer.getSnapshot();
         double newRateLimit = 0;
 
-        long commitTimeoutInSeconds = actorContext.getDatastoreContext()
-                .getShardTransactionCommitTimeoutInSeconds();
+        long commitTimeoutInSeconds = context.getShardTransactionCommitTimeoutInSeconds();
         long commitTimeoutInNanos = TimeUnit.SECONDS.toNanos(commitTimeoutInSeconds);
 
         // Find the time that it takes for transactions to get executed in every 10th percentile
@@ -59,7 +78,7 @@ public class TransactionRateLimitingCallback implements OperationCallback{
 
             if(percentileTimeInNanos > 0) {
                 // Figure out the rate limit for the i*10th percentile in nanos
-                double percentileRateLimit = ((double) commitTimeoutInNanos / percentileTimeInNanos);
+                double percentileRateLimit = (commitTimeoutInNanos / percentileTimeInNanos);
 
                 // Add the percentileRateLimit to the total rate limit
                 newRateLimit += percentileRateLimit;
@@ -67,17 +86,38 @@ public class TransactionRateLimitingCallback implements OperationCallback{
         }
 
         // Compute the rate limit per second
-        newRateLimit = newRateLimit/(commitTimeoutInSeconds*10);
-
-        LOG.debug("Data Store {} commit rateLimit adjusted to {}", actorContext.getDataStoreType(), newRateLimit);
-
-        actorContext.setTxCreationLimit(newRateLimit);
+        return newRateLimit/(commitTimeoutInSeconds*10);
     }
 
-    @Override
-    public void failure() {
-        // This would mean we couldn't get a transaction completed in 30 seconds which is
-        // the default transaction commit timeout. Using the timeout information to figure out the rate limit is
-        // not going to be useful - so we leave it as it is
+    public static void adjustRateLimitForUnusedTransaction(ActorContext actorContext) {
+        // Unused transactions in one data store can artificially limit the rate for other data stores
+        // if the first data store's rate is still at a lower initial rate since the front-end creates
+        // transactions in each data store up-front even though the client may not actually submit changes.
+        // So we may have to adjust the rate for data stores with unused transactions.
+
+        // First calculate the current rate for the data store. If it's 0 then there have been no
+        // actual transactions committed to the data store.
+
+        double newRateLimit = calculateNewRateLimit(actorContext.getOperationTimer(COMMIT),
+                actorContext.getDatastoreContext());
+        if(newRateLimit == 0.0) {
+            // Since we have no rate data for unused Tx's data store, adjust to the rate from another
+            // data store that does have rate data.
+            for(String datastoreType: DatastoreContext.getGlobalDatastoreTypes()) {
+                if(datastoreType.equals(actorContext.getDataStoreType())) {
+                    continue;
+                }
+
+                newRateLimit = calculateNewRateLimit(actorContext.getOperationTimer(datastoreType, COMMIT),
+                        actorContext.getDatastoreContext());
+                if(newRateLimit > 0.0) {
+                    LOG.debug("On unused Tx - data Store {} commit rateLimit adjusted to {}",
+                            actorContext.getDataStoreType(), newRateLimit);
+
+                    actorContext.setTxCreationLimit(newRateLimit);
+                    break;
+                }
+            }
+        }
     }
 }
\ No newline at end of file
index 3b4a190a9ea419ce07bf041dbc2563335402a903..e1313540c44868e7f8d81dc4f0a857411bf20a15 100644 (file)
@@ -33,7 +33,7 @@ public class WriteOnlyTransactionContextImpl extends TransactionContextImpl {
     @Override
     public Future<ActorSelection> readyTransaction() {
         LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
-                identifier, recordedOperationFutures.size());
+            getIdentifier(), recordedOperationCount());
 
         // Send the remaining batched modifications if any.
 
index ccfb32969287291b941861ecaefe1c1f25df5613..c3450333a46447d50aa16f6021fc2d48592a768b 100644 (file)
@@ -45,26 +45,26 @@ public class PreLithiumTransactionContextImpl extends TransactionContextImpl {
 
     @Override
     public void deleteData(YangInstanceIdentifier path) {
-        recordedOperationFutures.add(executeOperationAsync(
+        recordOperationFuture(executeOperationAsync(
                 new DeleteData(path, getRemoteTransactionVersion())));
     }
 
     @Override
     public void mergeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-        recordedOperationFutures.add(executeOperationAsync(
+        recordOperationFuture(executeOperationAsync(
                 new MergeData(path, data, getRemoteTransactionVersion())));
     }
 
     @Override
     public void writeData(YangInstanceIdentifier path, NormalizedNode<?, ?> data) {
-        recordedOperationFutures.add(executeOperationAsync(
+        recordOperationFuture(executeOperationAsync(
                 new WriteData(path, data, getRemoteTransactionVersion())));
     }
 
     @Override
     public Future<ActorSelection> readyTransaction() {
         LOG.debug("Tx {} readyTransaction called with {} previous recorded operations pending",
-                identifier, recordedOperationFutures.size());
+            getIdentifier(), recordedOperationCount());
 
         // Send the ReadyTransaction message to the Tx actor.
 
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/DatastoreInfoMXBean.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/DatastoreInfoMXBean.java
new file mode 100644 (file)
index 0000000..d393d64
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+
+/**
+ * JMX bean for general datastore info.
+ *
+ * @author Thomas Pantelis
+ */
+public interface DatastoreInfoMXBean {
+    double getTransactionCreationRateLimit();
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/DatastoreInfoMXBeanImpl.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/jmx/mbeans/DatastoreInfoMXBeanImpl.java
new file mode 100644 (file)
index 0000000..7976344
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2015 Brocade Communications Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.jmx.mbeans;
+
+import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
+import org.opendaylight.controller.md.sal.common.util.jmx.AbstractMXBean;
+
+/**
+ * Implementation of DatastoreInfoMXBean.
+ *
+ * @author Thomas Pantelis
+ */
+public class DatastoreInfoMXBeanImpl extends AbstractMXBean implements DatastoreInfoMXBean {
+
+    private final ActorContext actorContext;
+
+    public DatastoreInfoMXBeanImpl(String mxBeanType, ActorContext actorContext) {
+        super("GeneralRuntimeInfo", mxBeanType, null);
+        this.actorContext = actorContext;
+    }
+
+
+    @Override
+    public double getTransactionCreationRateLimit() {
+        return actorContext.getTxCreationLimit();
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeChangeListenerRegistration.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeChangeListenerRegistration.java
new file mode 100644 (file)
index 0000000..032f4c1
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+
+public final class CloseDataTreeChangeListenerRegistration implements Serializable {
+    private static final long serialVersionUID = 1L;
+    private static final CloseDataTreeChangeListenerRegistration INSTANCE = new CloseDataTreeChangeListenerRegistration();
+
+    private CloseDataTreeChangeListenerRegistration() {
+    }
+
+    public static CloseDataTreeChangeListenerRegistration getInstance() {
+        return INSTANCE;
+    }
+
+    private Object readResolve() throws ObjectStreamException {
+        return INSTANCE;
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeChangeListenerRegistrationReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/CloseDataTreeChangeListenerRegistrationReply.java
new file mode 100644 (file)
index 0000000..9d83fac
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+
+public final class CloseDataTreeChangeListenerRegistrationReply implements Serializable {
+    private static final long serialVersionUID = 1L;
+    private static final CloseDataTreeChangeListenerRegistrationReply INSTANCE = new CloseDataTreeChangeListenerRegistrationReply();
+
+    private CloseDataTreeChangeListenerRegistrationReply() {
+        // Use getInstance() instead
+    }
+
+    public static CloseDataTreeChangeListenerRegistrationReply getInstance() {
+        return INSTANCE;
+    }
+
+    private Object readResolve() throws ObjectStreamException {
+        return INSTANCE;
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChanged.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChanged.java
new file mode 100644 (file)
index 0000000..919f944
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import com.google.common.base.Preconditions;
+import java.util.Collection;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
+
+/**
+ * A message about a DataTree having been changed. The message is not
+ * serializable on purpose. For delegating the change across cluster nodes,
+ * this needs to be intercepted by a local agent and forwarded as
+ * a {@link DataTreeDelta}.
+ */
+public final class DataTreeChanged {
+    private final Collection<DataTreeCandidate> changes;
+
+    public DataTreeChanged(final Collection<DataTreeCandidate> changes) {
+        this.changes = Preconditions.checkNotNull(changes);
+    }
+
+    /**
+     * Return the data changes.
+     *
+     * @return Change events
+     */
+    public Collection<DataTreeCandidate> getChanges() {
+        return changes;
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChangedReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/DataTreeChangedReply.java
new file mode 100644 (file)
index 0000000..e4a8d74
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+
+public final class DataTreeChangedReply implements Serializable {
+    private static final long serialVersionUID = 1L;
+    private static final DataTreeChangedReply INSTANCE = new DataTreeChangedReply();
+
+    private DataTreeChangedReply() {
+        // Use getInstance() instead
+    }
+
+    public static DataTreeChangedReply getInstance() {
+        return INSTANCE;
+    }
+
+    private Object readResolve() throws ObjectStreamException {
+        return INSTANCE;
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RegisterDataTreeChangeListener.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RegisterDataTreeChangeListener.java
new file mode 100644 (file)
index 0000000..941336e
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import akka.actor.ActorRef;
+import com.google.common.base.Preconditions;
+import java.io.Externalizable;
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import org.opendaylight.controller.cluster.datastore.utils.SerializationUtils;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+
+/**
+ * Request a {@link org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener} registration be made on the shard
+ * leader.
+ */
+public final class RegisterDataTreeChangeListener implements Externalizable {
+    private static final long serialVersionUID = 1L;
+    private ActorRef dataTreeChangeListenerPath;
+    private YangInstanceIdentifier path;
+
+    public RegisterDataTreeChangeListener(final YangInstanceIdentifier path, final ActorRef dataTreeChangeListenerPath) {
+        this.path = Preconditions.checkNotNull(path);
+        this.dataTreeChangeListenerPath = Preconditions.checkNotNull(dataTreeChangeListenerPath);
+    }
+
+    public YangInstanceIdentifier getPath() {
+        return path;
+    }
+
+    public ActorRef getDataTreeChangeListenerPath() {
+        return dataTreeChangeListenerPath;
+    }
+
+    @Override
+    public void writeExternal(final ObjectOutput out) throws IOException {
+        out.writeObject(dataTreeChangeListenerPath);
+        SerializationUtils.serializePath(path, out);
+    }
+
+    @Override
+    public void readExternal(final ObjectInput in) throws IOException, ClassNotFoundException {
+        dataTreeChangeListenerPath = (ActorRef) in.readObject();
+        path = SerializationUtils.deserializePath(in);
+    }
+}
diff --git a/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RegisterDataTreeChangeListenerReply.java b/opendaylight/md-sal/sal-distributed-datastore/src/main/java/org/opendaylight/controller/cluster/datastore/messages/RegisterDataTreeChangeListenerReply.java
new file mode 100644 (file)
index 0000000..88682ae
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.cluster.datastore.messages;
+
+import akka.actor.ActorRef;
+import com.google.common.base.Preconditions;
+import java.io.Serializable;
+
+/**
+ * Successful reply to a {@link RegisterDataTreeChangeListener} request.
+ */
+public final class RegisterDataTreeChangeListenerReply implements Serializable {
+    private static final long serialVersionUID = 1L;
+    private final ActorRef listenerRegistrationPath;
+
+    public RegisterDataTreeChangeListenerReply(final ActorRef listenerRegistrationPath) {
+        this.listenerRegistrationPath = Preconditions.checkNotNull(listenerRegistrationPath);
+    }
+
+    public ActorRef getListenerRegistrationPath() {
+        return listenerRegistrationPath;
+    }
+}
index b6250fc1cccbfbde152a8af7e1e2f4b9b3c6f7cd..f53368d8869416cb28340ab272aedcbfab449512 100644 (file)
@@ -20,7 +20,6 @@ import akka.dispatch.Mapper;
 import akka.dispatch.OnComplete;
 import akka.pattern.AskTimeoutException;
 import akka.util.Timeout;
-import com.codahale.metrics.JmxReporter;
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.Timer;
 import com.google.common.annotations.VisibleForTesting;
@@ -47,6 +46,7 @@ import org.opendaylight.controller.cluster.datastore.messages.LocalShardFound;
 import org.opendaylight.controller.cluster.datastore.messages.LocalShardNotFound;
 import org.opendaylight.controller.cluster.datastore.messages.PrimaryFound;
 import org.opendaylight.controller.cluster.datastore.messages.UpdateSchemaContext;
+import org.opendaylight.controller.cluster.reporting.MetricsReporter;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -64,10 +64,8 @@ import scala.concurrent.duration.FiniteDuration;
  */
 public class ActorContext {
     private static final Logger LOG = LoggerFactory.getLogger(ActorContext.class);
-    private static final String UNKNOWN_DATA_STORE_TYPE = "unknown";
     private static final String DISTRIBUTED_DATA_STORE_METRIC_REGISTRY = "distributed-data-store";
     private static final String METRIC_RATE = "rate";
-    private static final String DOMAIN = "org.opendaylight.controller.cluster.datastore";
     private static final Mapper<Throwable, Throwable> FIND_PRIMARY_FAILURE_TRANSFORMER =
                                                               new Mapper<Throwable, Throwable>() {
         @Override
@@ -94,8 +92,6 @@ public class ActorContext {
     private Timeout operationTimeout;
     private final String selfAddressHostPort;
     private RateLimiter txRateLimiter;
-    private final MetricRegistry metricRegistry = new MetricRegistry();
-    private final JmxReporter jmxReporter = JmxReporter.forRegistry(metricRegistry).inDomain(DOMAIN).build();
     private final int transactionOutstandingOperationLimit;
     private Timeout transactionCommitOperationTimeout;
     private Timeout shardInitializationTimeout;
@@ -104,6 +100,7 @@ public class ActorContext {
 
     private volatile SchemaContext schemaContext;
     private volatile boolean updated;
+    private final MetricRegistry metricRegistry = MetricsReporter.getInstance(DatastoreContext.METRICS_DOMAIN).getMetricsRegistry();
 
     public ActorContext(ActorSystem actorSystem, ActorRef shardManager,
             ClusterWrapper clusterWrapper, Configuration configuration) {
@@ -131,8 +128,6 @@ public class ActorContext {
         }
 
         transactionOutstandingOperationLimit = new CommonConfig(this.getActorSystem().settings().config()).getMailBoxCapacity();
-        jmxReporter.start();
-
     }
 
     private void setCachedProperties() {
@@ -482,7 +477,12 @@ public class ActorContext {
      * @return
      */
     public Timer getOperationTimer(String operationName){
-        final String rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, datastoreContext.getDataStoreType(), operationName, METRIC_RATE);
+        return getOperationTimer(datastoreContext.getDataStoreType(), operationName);
+    }
+
+    public Timer getOperationTimer(String dataStoreType, String operationName){
+        final String rate = MetricRegistry.name(DISTRIBUTED_DATA_STORE_METRIC_REGISTRY, dataStoreType,
+                operationName, METRIC_RATE);
         return metricRegistry.timer(rate);
     }
 
index cc96d0d3b0d070623c737dc8f78340c45a20539f..e04c1a5d185ffbb5bc81b4d035a07545c02ef3a7 100644 (file)
@@ -16,9 +16,7 @@ import akka.actor.Props;
 import akka.dispatch.Dispatchers;
 import akka.dispatch.OnComplete;
 import akka.japi.Creator;
-import akka.japi.Procedure;
 import akka.pattern.Patterns;
-import akka.persistence.SnapshotSelectionCriteria;
 import akka.testkit.TestActorRef;
 import akka.util.Timeout;
 import com.google.common.base.Function;
@@ -41,6 +39,7 @@ import java.util.concurrent.atomic.AtomicReference;
 import org.junit.Test;
 import org.mockito.InOrder;
 import org.opendaylight.controller.cluster.DataPersistenceProvider;
+import org.opendaylight.controller.cluster.DelegatingPersistentDataProvider;
 import org.opendaylight.controller.cluster.datastore.identifiers.ShardIdentifier;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransaction;
 import org.opendaylight.controller.cluster.datastore.messages.AbortTransactionReply;
@@ -1391,44 +1390,20 @@ public class ShardTest extends AbstractShardTest {
     public void testCreateSnapshot(final boolean persistent, final String shardActorName) throws Exception{
 
         final AtomicReference<Object> savedSnapshot = new AtomicReference<>();
-        class DelegatingPersistentDataProvider implements DataPersistenceProvider {
-            DataPersistenceProvider delegate;
-
-            DelegatingPersistentDataProvider(DataPersistenceProvider delegate) {
-                this.delegate = delegate;
-            }
-
-            @Override
-            public boolean isRecoveryApplicable() {
-                return delegate.isRecoveryApplicable();
-            }
-
-            @Override
-            public <T> void persist(T o, Procedure<T> procedure) {
-                delegate.persist(o, procedure);
+        class TestPersistentDataProvider extends DelegatingPersistentDataProvider {
+            TestPersistentDataProvider(DataPersistenceProvider delegate) {
+                super(delegate);
             }
 
             @Override
             public void saveSnapshot(Object o) {
                 savedSnapshot.set(o);
-                delegate.saveSnapshot(o);
-            }
-
-            @Override
-            public void deleteSnapshots(SnapshotSelectionCriteria criteria) {
-                delegate.deleteSnapshots(criteria);
-            }
-
-            @Override
-            public void deleteMessages(long sequenceNumber) {
-                delegate.deleteMessages(sequenceNumber);
+                super.saveSnapshot(o);
             }
         }
 
         dataStoreContextBuilder.persistent(persistent);
 
-
-
         new ShardTestKit(getSystem()) {{
             final AtomicReference<CountDownLatch> latch = new AtomicReference<>(new CountDownLatch(1));
 
@@ -1437,15 +1412,7 @@ public class ShardTest extends AbstractShardTest {
                 protected TestShard(ShardIdentifier name, Map<String, String> peerAddresses,
                                     DatastoreContext datastoreContext, SchemaContext schemaContext) {
                     super(name, peerAddresses, datastoreContext, schemaContext);
-                }
-
-                DelegatingPersistentDataProvider delegating;
-
-                protected DataPersistenceProvider persistence() {
-                    if(delegating == null) {
-                        delegating = new DelegatingPersistentDataProvider(super.persistence());
-                    }
-                    return delegating;
+                    setPersistence(new TestPersistentDataProvider(super.persistence()));
                 }
 
                 @Override
@@ -1560,14 +1527,14 @@ public class ShardTest extends AbstractShardTest {
             TestActorRef<Shard> shard1 = TestActorRef.create(getSystem(),
                     persistentProps, "testPersistence1");
 
-            assertTrue("Recovery Applicable", shard1.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+            assertTrue("Recovery Applicable", shard1.underlyingActor().persistence().isRecoveryApplicable());
 
             shard1.tell(PoisonPill.getInstance(), ActorRef.noSender());
 
             TestActorRef<Shard> shard2 = TestActorRef.create(getSystem(),
                     nonPersistentProps, "testPersistence2");
 
-            assertFalse("Recovery Not Applicable", shard2.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+            assertFalse("Recovery Not Applicable", shard2.underlyingActor().persistence().isRecoveryApplicable());
 
             shard2.tell(PoisonPill.getInstance(), ActorRef.noSender());
 
@@ -1583,19 +1550,19 @@ public class ShardTest extends AbstractShardTest {
             TestActorRef<Shard> shard = TestActorRef.create(getSystem(), newShardProps(), "testOnDatastoreContext");
 
             assertEquals("isRecoveryApplicable", true,
-                    shard.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+                    shard.underlyingActor().persistence().isRecoveryApplicable());
 
             waitUntilLeader(shard);
 
             shard.tell(dataStoreContextBuilder.persistent(false).build(), ActorRef.noSender());
 
             assertEquals("isRecoveryApplicable", false,
-                    shard.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+                    shard.underlyingActor().persistence().isRecoveryApplicable());
 
             shard.tell(dataStoreContextBuilder.persistent(true).build(), ActorRef.noSender());
 
             assertEquals("isRecoveryApplicable", true,
-                    shard.underlyingActor().getDataPersistenceProvider().isRecoveryApplicable());
+                    shard.underlyingActor().persistence().isRecoveryApplicable());
 
             shard.tell(PoisonPill.getInstance(), ActorRef.noSender());
         }};
index 265ec59f1cd324889627281850ffae9ed456c52a..a2471001864c1571a8a52d4abf0b8f41e7571ea3 100644 (file)
@@ -10,7 +10,6 @@ import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.isA;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.never;
-import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.READ_ONLY;
 import static org.opendaylight.controller.cluster.datastore.TransactionProxy.TransactionType.READ_WRITE;
@@ -164,34 +163,6 @@ public class TransactionProxyTest extends AbstractTransactionProxyTest {
         testReadWithExceptionOnInitialCreateTransaction(new TestException());
     }
 
-    @Test(expected = TestException.class)
-    public void testReadWithPriorRecordingOperationFailure() throws Throwable {
-        doReturn(dataStoreContextBuilder.shardBatchedModificationCount(2).build()).
-                when(mockActorContext).getDatastoreContext();
-
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectFailedBatchedModifications(actorRef);
-
-        doReturn(readSerializedDataReply(null)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqSerializedReadData());
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockActorContext, READ_WRITE);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        transactionProxy.delete(TestModel.TEST_PATH);
-
-        try {
-            propagateReadFailedExceptionCause(transactionProxy.read(TestModel.TEST_PATH));
-        } finally {
-            verify(mockActorContext, times(0)).executeOperationAsync(
-                    eq(actorSelection(actorRef)), eqSerializedReadData());
-        }
-    }
-
     @Test
     public void testReadWithPriorRecordingOperationSuccessful() throws Throwable {
         ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
@@ -301,35 +272,6 @@ public class TransactionProxyTest extends AbstractTransactionProxyTest {
         propagateReadFailedExceptionCause(transactionProxy.exists(TestModel.TEST_PATH));
     }
 
-    @Test(expected = TestException.class)
-    public void testExistsWithPriorRecordingOperationFailure() throws Throwable {
-        doReturn(dataStoreContextBuilder.shardBatchedModificationCount(2).build()).
-                when(mockActorContext).getDatastoreContext();
-
-        ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
-
-        NormalizedNode<?, ?> nodeToWrite = ImmutableNodes.containerNode(TestModel.TEST_QNAME);
-
-        expectFailedBatchedModifications(actorRef);
-
-        doReturn(dataExistsSerializedReply(false)).when(mockActorContext).executeOperationAsync(
-                eq(actorSelection(actorRef)), eqSerializedDataExists());
-
-        TransactionProxy transactionProxy = new TransactionProxy(mockActorContext,
-                READ_WRITE);
-
-        transactionProxy.write(TestModel.TEST_PATH, nodeToWrite);
-
-        transactionProxy.delete(TestModel.TEST_PATH);
-
-        try {
-            propagateReadFailedExceptionCause(transactionProxy.exists(TestModel.TEST_PATH));
-        } finally {
-            verify(mockActorContext, times(0)).executeOperationAsync(
-                    eq(actorSelection(actorRef)), eqSerializedDataExists());
-        }
-    }
-
     @Test
     public void testExistsWithPriorRecordingOperationSuccessful() throws Throwable {
         ActorRef actorRef = setupActorContextWithInitialCreateTransaction(getSystem(), READ_WRITE);
index f97e32547261727f842c087e1e53ef502d1d40ea..69a023ad3158ebbbe0e1fd04c924d0358851a0d9 100644 (file)
@@ -23,6 +23,7 @@ import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Matchers;
 import org.mockito.Mock;
+import org.mockito.Mockito;
 import org.mockito.MockitoAnnotations;
 import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
 
@@ -170,12 +171,48 @@ public class TransactionRateLimitingCommitCallbackTest {
 
     }
 
+    @Test
+    public void testAdjustRateLimitForUnusedTransaction() {
+        doReturn(commitTimer).when(actorContext).getOperationTimer("one", "commit");
+        doReturn("one").when(actorContext).getDataStoreType();
+
+        Timer commitTimer2 = Mockito.mock(Timer.class);
+        Snapshot commitSnapshot2 = Mockito.mock(Snapshot.class);
+
+        doReturn(commitSnapshot2).when(commitTimer2).getSnapshot();
+
+        doReturn(commitTimer2).when(actorContext).getOperationTimer("two", "commit");
+
+        DatastoreContext.newBuilder().dataStoreType("one").build();
+        DatastoreContext.newBuilder().dataStoreType("two").build();
+
+        doReturn(TimeUnit.MICROSECONDS.toNanos(500) * 1D).when(commitSnapshot).getValue(1 * 0.1);
+
+        TransactionRateLimitingCallback.adjustRateLimitForUnusedTransaction(actorContext);
+
+        verify(actorContext, never()).setTxCreationLimit(anyDouble());
+
+        Mockito.reset(commitSnapshot);
+
+        TransactionRateLimitingCallback.adjustRateLimitForUnusedTransaction(actorContext);
+
+        verify(actorContext, never()).setTxCreationLimit(anyDouble());
+
+        System.out.println(""+TimeUnit.SECONDS.toNanos(30)/TimeUnit.MICROSECONDS.toNanos(100));
+
+        doReturn(TimeUnit.MICROSECONDS.toNanos(100) * 1D).when(commitSnapshot2).getValue(1 * 0.1);
+
+        TransactionRateLimitingCallback.adjustRateLimitForUnusedTransaction(actorContext);
+
+        verify(actorContext).setTxCreationLimit(Matchers.doubleThat(approximately(1000)));
+    }
+
     public Matcher<Double> approximately(final double val){
         return new BaseMatcher<Double>() {
             @Override
             public boolean matches(Object o) {
                 Double aDouble = (Double) o;
-                return aDouble > val && aDouble < val+1;
+                return aDouble >= val && aDouble <= val+1;
             }
 
             @Override
index f404c0637f4cfe5ac49fb4fa5001ad1c71aa563d..8b3a8308c4a34a70c7abb3ca8652a948060aa0cd 100644 (file)
@@ -6,6 +6,7 @@
  */
 package org.opendaylight.controller.md.sal.dom.api;
 
+import com.google.common.base.MoreObjects;
 import com.google.common.base.Preconditions;
 import java.io.Serializable;
 import java.util.Iterator;
@@ -102,4 +103,9 @@ public final class DOMDataTreeIdentifier implements Immutable, Path<DOMDataTreeI
 
         return oi.hasNext() ? -1 : 0;
     }
+
+    @Override
+    public String toString() {
+        return MoreObjects.toStringHelper(this).add("datastore", datastoreType).add("root", rootIdentifier).toString();
+    }
 }
index b9972fc0a09f9773db0270e51198209438df010e..e814dd254153d6ae8c32e38f7a6745a4e79d7e2e 100644 (file)
@@ -17,7 +17,7 @@ import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import com.lmax.disruptor.EventHandler;
 import com.lmax.disruptor.InsufficientCapacityException;
-import com.lmax.disruptor.SleepingWaitStrategy;
+import com.lmax.disruptor.PhasedBackoffWaitStrategy;
 import com.lmax.disruptor.WaitStrategy;
 import com.lmax.disruptor.dsl.Disruptor;
 import com.lmax.disruptor.dsl.ProducerType;
@@ -50,7 +50,7 @@ import org.opendaylight.yangtools.yang.model.api.SchemaPath;
  */
 public final class DOMNotificationRouter implements AutoCloseable, DOMNotificationPublishService, DOMNotificationService {
     private static final ListenableFuture<Void> NO_LISTENERS = Futures.immediateFuture(null);
-    private static final WaitStrategy DEFAULT_STRATEGY = new SleepingWaitStrategy();
+    private static final WaitStrategy DEFAULT_STRATEGY = PhasedBackoffWaitStrategy.withLock(1L, 30L, TimeUnit.MILLISECONDS);
     private static final EventHandler<DOMNotificationRouterEvent> DISPATCH_NOTIFICATIONS = new EventHandler<DOMNotificationRouterEvent>() {
         @Override
         public void onEvent(final DOMNotificationRouterEvent event, final long sequence, final boolean endOfBatch) throws Exception {
@@ -5,38 +5,52 @@
  * terms of the Eclipse Public License v1.0 which accompanies this distribution,
  * and is available at http://www.eclipse.org/legal/epl-v10.html
  */
-package org.opendaylight.controller.md.sal.dom.store.impl;
+package org.opendaylight.controller.sal.core.spi.data;
 
+import com.google.common.annotations.Beta;
 import com.google.common.base.MoreObjects;
 import com.google.common.base.MoreObjects.ToStringHelper;
 import com.google.common.base.Preconditions;
-import org.opendaylight.controller.sal.core.spi.data.DOMStoreTransaction;
-import org.slf4j.Logger;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
 
 /**
- * Abstract DOM Store Transaction
+ * Abstract DOM Store Transaction.
  *
  * Convenience super implementation of DOM Store transaction which provides
  * common implementation of {@link #toString()} and {@link #getIdentifier()}.
+ *
+ * It can optionally capture the context where it was allocated.
+ *
+ * <T> identifier type
  */
-abstract class AbstractDOMStoreTransaction implements DOMStoreTransaction {
+@Beta
+public abstract class AbstractDOMStoreTransaction<T> implements DOMStoreTransaction {
     private final Throwable debugContext;
-    private final Object identifier;
+    private final T identifier;
+
+    protected AbstractDOMStoreTransaction(@Nonnull final T identifier) {
+        this(identifier, false);
+    }
 
-    protected AbstractDOMStoreTransaction(final Object identifier, final boolean debug) {
+    protected AbstractDOMStoreTransaction(@Nonnull final T identifier, final boolean debug) {
         this.identifier = Preconditions.checkNotNull(identifier, "Identifier must not be null.");
         this.debugContext = debug ? new Throwable().fillInStackTrace() : null;
     }
 
     @Override
-    public final Object getIdentifier() {
+    public final T getIdentifier() {
         return identifier;
     }
 
-    protected final void warnDebugContext(final Logger logger) {
-        if (debugContext != null) {
-            logger.warn("Transaction {} has been allocated in the following context", identifier, debugContext);
-        }
+    /**
+     * Return the context in which this transaction was allocated.
+     *
+     * @return The context in which this transaction was allocated, or null
+     *         if the context was not recorded.
+     */
+    @Nullable public final Throwable getDebugContext() {
+        return debugContext;
     }
 
     @Override
@@ -51,7 +65,7 @@ abstract class AbstractDOMStoreTransaction implements DOMStoreTransaction {
      *            ToStringHelper instance
      * @return ToStringHelper instance which was passed in
      */
-    protected ToStringHelper addToStringAttributes(final ToStringHelper toStringHelper) {
+    protected ToStringHelper addToStringAttributes(@Nonnull final ToStringHelper toStringHelper) {
         return toStringHelper.add("id", identifier);
     }
-}
\ No newline at end of file
+}
diff --git a/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/ForwardingDOMStoreThreePhaseCommitCohort.java b/opendaylight/md-sal/sal-dom-spi/src/main/java/org/opendaylight/controller/sal/core/spi/data/ForwardingDOMStoreThreePhaseCommitCohort.java
new file mode 100644 (file)
index 0000000..4c817dd
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+package org.opendaylight.controller.sal.core.spi.data;
+
+import com.google.common.annotations.Beta;
+import com.google.common.collect.ForwardingObject;
+import com.google.common.util.concurrent.ListenableFuture;
+
+/**
+ * Abstract base class for {@link DOMStoreThreePhaseCommitCohort} implementations,
+ * which forward most of their functionality to a backend {@link #delegate()}.
+ */
+@Beta
+public abstract class ForwardingDOMStoreThreePhaseCommitCohort extends ForwardingObject implements DOMStoreThreePhaseCommitCohort {
+    @Override
+    protected abstract DOMStoreThreePhaseCommitCohort delegate();
+
+    @Override
+    public ListenableFuture<Boolean> canCommit() {
+        return delegate().canCommit();
+    }
+
+    @Override
+    public ListenableFuture<Void> preCommit() {
+        return delegate().preCommit();
+    }
+
+    @Override
+    public ListenableFuture<Void> abort() {
+        return delegate().abort();
+    }
+
+    @Override
+    public ListenableFuture<Void> commit() {
+        return delegate().commit();
+    }
+}
index d7d547d19e847303d88f94a7e75d80149c5a4b13..1b9a37df66ae351415a517064bb1b86a99b46e04 100644 (file)
@@ -8,8 +8,8 @@
                     </type>
                     <name>XSQL</name>
                     <data-broker>
-                       <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
-                       <name>binding-data-broker</name>
+                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-async-data-broker</type>
+                        <name>binding-data-broker</name>
                     </data-broker>
                     <async-data-broker>
                         <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
index cc92b48a157abc00a9baf5c80b8b37aa034fcf30..2cb2e7bfb5482250c52432780424e7766d979da0 100644 (file)
@@ -1,3 +1,10 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
 package org.odl.xsql;
 
 import java.sql.Connection;
@@ -10,7 +17,9 @@ import java.util.Properties;
 import java.util.logging.Logger;
 
 import org.opendaylight.controller.md.sal.dom.xsql.jdbc.JDBCConnection;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
 public class JDBCDriver implements Driver {
 
     public static JDBCDriver drv = new JDBCDriver();
index 2f280527580a3eecabe62b4d674ae46ec586ff70..938d25ec5042dcff9092b392e5c7e57292e41c26 100644 (file)
@@ -1,3 +1,10 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
 package org.opendaylight.controller.md.sal.dom.xsql;
 
 import java.io.InputStream;
@@ -21,7 +28,9 @@ import java.sql.Time;
 import java.sql.Timestamp;
 import java.util.Calendar;
 import java.util.Map;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
 public class TablesResultSet implements ResultSet {
 
     private String tables[] = null;
index a5658ccc9ed661b9b2d616f2ca70a2303e875bb0..05f65225eaec15755b520bf1f00e44038416e849 100644 (file)
@@ -1,3 +1,10 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
 package org.opendaylight.controller.md.sal.dom.xsql;
 
 import java.io.File;
@@ -24,7 +31,9 @@ import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.model.api.Module;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
 public class XSQLAdapter extends Thread implements SchemaContextListener {
 
     private static final int SLEEP = 10000;
@@ -51,6 +60,7 @@ public class XSQLAdapter extends Thread implements SchemaContextListener {
     private String pinningFile;
     private ServerSocket serverSocket = null;
     private DOMDataBroker domDataBroker = null;
+    private static final String REFERENCE_FIELD_NAME = "reference";
 
     private XSQLAdapter() {
         XSQLAdapter.log("Starting Adapter");
@@ -152,28 +162,18 @@ public class XSQLAdapter extends Thread implements SchemaContextListener {
                 List<Object> result = new LinkedList<Object>();
                 YangInstanceIdentifier instanceIdentifier = YangInstanceIdentifier
                         .builder()
-                        .node(XSQLODLUtils.getPath(table.getODLNode()).get(0))
+                        .node(XSQLODLUtils.getPath(table.getFirstFromSchemaNodes()).get(0))
                         .toInstance();
                 DOMDataReadTransaction t = this.domDataBroker
                         .newReadOnlyTransaction();
                 Object node = t.read(type,
                         instanceIdentifier).get();
 
-                node = XSQLODLUtils.get(node, "reference");
+                node = XSQLODLUtils.get(node, REFERENCE_FIELD_NAME);
                 if (node == null) {
                     return result;
                 }
-
-                Map<?, ?> children = XSQLODLUtils.getChildren(node);
-                for (Object c : children.values()) {
-                    result.add(c);
-                    /* I don't remember why i did this... possibly to prevent different siblings queried together
-                    Map<?, ?> sons = XSQLODLUtils.getChildren(c);
-                    for (Object child : sons.values()) {
-                        result.add(child);
-                    }*/
-                }
-
+                result.add(node);
                 return result;
             } catch (Exception err) {
                 XSQLAdapter.log(err);
index a9c0f69fc6e3e8e58166fff6373327a8b91b82e7..76152966d06e8448f0056ed363ec564d761295ec 100644 (file)
@@ -1,3 +1,10 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
 package org.opendaylight.controller.md.sal.dom.xsql;
 
 import java.io.DataInputStream;
@@ -23,7 +30,9 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
 public class XSQLBluePrint implements DatabaseMetaData, Serializable {
 
     private static final long serialVersionUID = 1L;
@@ -203,15 +212,23 @@ public class XSQLBluePrint implements DatabaseMetaData, Serializable {
         return result;
     }
 
-    public void addToBluePrintCache(XSQLBluePrintNode blNode) {
-        this.tableNameToBluePrint.put(blNode.getBluePrintNodeName(), blNode);
-        Map<String, XSQLBluePrintNode> map = this.odlNameToBluePrint.get(blNode
-                .getODLTableName());
-        if (map == null) {
-            map = new HashMap<String, XSQLBluePrintNode>();
-            this.odlNameToBluePrint.put(blNode.getODLTableName(), map);
+    public XSQLBluePrintNode addToBluePrintCache(XSQLBluePrintNode blNode,XSQLBluePrintNode parent) {
+        XSQLBluePrintNode existingNode = this.tableNameToBluePrint.get(blNode.getBluePrintNodeName());
+        if(existingNode!=null){
+            existingNode.mergeAugmentation(blNode);
+            return existingNode;
+        }else{
+            this.tableNameToBluePrint.put(blNode.getBluePrintNodeName(), blNode);
+            Map<String, XSQLBluePrintNode> map = this.odlNameToBluePrint.get(blNode.getODLTableName());
+            if (map == null) {
+                map = new HashMap<String, XSQLBluePrintNode>();
+                this.odlNameToBluePrint.put(blNode.getODLTableName(), map);
+            }
+            map.put(blNode.getBluePrintNodeName(), blNode);
+            if(parent!=null)
+                parent.addChild(blNode);
+            return blNode;
         }
-        map.put(blNode.getBluePrintNodeName(), blNode);
     }
 
     public Class<?> getGenericType(ParameterizedType type) {
index 4a565452388648b14719df92e97ec5e412c3d416..d3cd91a6bd656bb5d92078092d9544bb4ae19eb6 100644 (file)
@@ -1,3 +1,10 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
 package org.opendaylight.controller.md.sal.dom.xsql;
 
 import java.io.Serializable;
@@ -8,6 +15,9 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
 public class XSQLBluePrintNode implements Serializable {
 
     private static final long serialVersionUID = 1L;
@@ -24,12 +34,25 @@ public class XSQLBluePrintNode implements Serializable {
     private Set<XSQLColumn> columns = new HashSet<XSQLColumn>();
     private Map<String, XSQLColumn> origNameToColumn = new HashMap<String, XSQLColumn>();
 
-    private transient Object odlNode = null;
+    private transient Object[] odlSchemaNodes = null;
     private boolean module = false;
     private String bluePrintTableName = null;
     private String odlTableName = null;
     private String origName = null;
 
+    public void mergeAugmentation(XSQLBluePrintNode aug) {
+        this.relations.addAll(aug.relations);
+        this.inheritingNodes.addAll(aug.inheritingNodes);
+        this.children.addAll(aug.children);
+        this.columns.addAll(aug.columns);
+        this.origNameToColumn.putAll(aug.origNameToColumn);
+        if (aug.odlSchemaNodes != null) {
+            for (Object sn : aug.odlSchemaNodes) {
+                addToSchemaNodes(sn);
+            }
+        }
+    }
+
     public XSQLBluePrintNode(String name, String _origName, int _level) {
         this.level = _level;
         this.odlTableName = name;
@@ -46,12 +69,32 @@ public class XSQLBluePrintNode implements Serializable {
 
     public XSQLBluePrintNode(Object _odlNode, int _level,
             XSQLBluePrintNode _parent) {
-        this.odlNode = _odlNode;
+        addToSchemaNodes(_odlNode);
         this.level = _level;
         this.module = XSQLODLUtils.isModule(_odlNode);
         this.parent = _parent;
         this.bluePrintTableName = XSQLODLUtils.getBluePrintName(_odlNode);
-        this.odlTableName = XSQLODLUtils.getODLNodeName(this.odlNode);
+        this.odlTableName = XSQLODLUtils
+                .getODLNodeName(getFirstFromSchemaNodes());
+    }
+
+    private void addToSchemaNodes(Object schemaObject) {
+        if (this.odlSchemaNodes == null)
+            this.odlSchemaNodes = new Object[1];
+        else {
+            Object[] temp = new Object[this.odlSchemaNodes.length + 1];
+            System.arraycopy(this.odlSchemaNodes, 0, temp, 0,
+                    this.odlSchemaNodes.length);
+            this.odlSchemaNodes = temp;
+        }
+        this.odlSchemaNodes[this.odlSchemaNodes.length - 1] = schemaObject;
+    }
+
+    public Object getFirstFromSchemaNodes() {
+        if (this.odlSchemaNodes == null) {
+            return null;
+        }
+        return this.odlSchemaNodes[0];
     }
 
     public String getOrigName() {
@@ -72,16 +115,13 @@ public class XSQLBluePrintNode implements Serializable {
 
     public String getODLTableName() {
         if (this.odlTableName == null) {
-            this.odlTableName = XSQLODLUtils.getODLNodeName(this.odlNode);
+            this.odlTableName = XSQLODLUtils
+                    .getODLNodeName(getFirstFromSchemaNodes());
         }
         return this.odlTableName;
     }
 
-    public Object getODLNode() {
-        return this.odlNode;
-    }
-
-    public void AddChild(XSQLBluePrintNode ch) {
+    public void addChild(XSQLBluePrintNode ch) {
         this.children.add(ch);
     }
 
@@ -218,7 +258,7 @@ public class XSQLBluePrintNode implements Serializable {
         if (myInterfaceName != null) {
             return myInterfaceName;
         }
-        if (odlNode != null) {
+        if (this.odlSchemaNodes != null) {
             return getBluePrintNodeName();
         }
         if (odlTableName != null) {
@@ -238,15 +278,14 @@ public class XSQLBluePrintNode implements Serializable {
     @Override
     public boolean equals(Object obj) {
         XSQLBluePrintNode other = (XSQLBluePrintNode) obj;
-        if (odlNode != null) {
+        if (this.odlSchemaNodes != null) {
             return getBluePrintNodeName().equals(other.getBluePrintNodeName());
         } else if (this.odlTableName == null && other.odlTableName != null) {
             return false;
         }
         if (this.odlTableName != null && other.odlTableName == null) {
             return false;
-        }
-        else {
+        } else {
             return this.odlTableName.equals(other.odlTableName);
         }
     }
@@ -255,7 +294,7 @@ public class XSQLBluePrintNode implements Serializable {
     public int hashCode() {
         if (myInterfaceString != null) {
             return myInterfaceString.hashCode();
-        } else if (odlNode != null) {
+        } else if (this.odlSchemaNodes != null) {
             return bluePrintTableName.hashCode();
         }
         return 0;
index 17b8ae5f291e7a12f425dd20a96010ddb165af20..16a33b380bb4e54a09ee6e70025e45e8ab1de263 100644 (file)
@@ -1,15 +1,27 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
 package org.opendaylight.controller.md.sal.dom.xsql;
 
 import java.lang.reflect.Field;
+import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 
 import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.model.api.AugmentationSchema;
+import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
 import org.opendaylight.yangtools.yang.model.api.DataNodeContainer;
 import org.opendaylight.yangtools.yang.model.api.DataSchemaNode;
 import org.opendaylight.yangtools.yang.model.api.ListSchemaNode;
@@ -20,7 +32,9 @@ import org.opendaylight.yangtools.yang.model.util.Uint16;
 import org.opendaylight.yangtools.yang.model.util.Uint32;
 import org.opendaylight.yangtools.yang.model.util.Uint64;
 import org.opendaylight.yangtools.yang.model.util.Uint8;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
 public class XSQLODLUtils {
 
     private static Map<Class<?>, Class<?>> types =
@@ -113,7 +127,7 @@ public class XSQLODLUtils {
 
     public static boolean createOpenDaylightCache(XSQLBluePrint bluePrint,Object module) {
         XSQLBluePrintNode node = new XSQLBluePrintNode(module, 0,null);
-        bluePrint.addToBluePrintCache(node);
+        bluePrint.addToBluePrintCache(node,null);
         collectODL(bluePrint, node, ((Module) module).getChildNodes(), 1);
         return true;
     }
@@ -124,20 +138,30 @@ public class XSQLODLUtils {
             return;
         }
         for (DataSchemaNode n : nodes) {
-            if (n instanceof DataNodeContainer /*|| n instanceof LeafListSchemaNode*/
-                || n instanceof ListSchemaNode) {
+            if (n instanceof DataNodeContainer) {
                 XSQLBluePrintNode bn = new XSQLBluePrintNode(n, level,parent);
-                bluePrint.addToBluePrintCache(bn);
-                parent.AddChild(bn);
-                if (n instanceof DataNodeContainer) {
+                bn = bluePrint.addToBluePrintCache(bn,parent);
+                if (n instanceof ListSchemaNode) {
                     level++;
-                    collectODL(bluePrint, bn,
-                        ((DataNodeContainer) n).getChildNodes(), level);
+                    collectODL(bluePrint, bn,((ListSchemaNode) n).getChildNodes(), level);
+                    Set<AugmentationSchema> s = ((ListSchemaNode)n).getAvailableAugmentations();
+                    if(s!=null){
+                        for(AugmentationSchema as:s){
+                            collectODL(bluePrint, bn,as.getChildNodes(), level);
+                        }
+                    }
                     level--;
-                } else if (n instanceof ListSchemaNode) {
+                }else{
                     level++;
-                    collectODL(bluePrint, bn,
-                        ((ListSchemaNode) n).getChildNodes(), level);
+                    collectODL(bluePrint, bn,((DataNodeContainer) n).getChildNodes(), level);
+                    if(n instanceof ContainerSchemaNode){
+                       Set<AugmentationSchema> s = ((ContainerSchemaNode)n).getAvailableAugmentations();
+                       if(s!=null){
+                           for(AugmentationSchema as:s){
+                               collectODL(bluePrint, bn,as.getChildNodes(), level);
+                           }
+                       }
+                    }
                     level--;
                 }
             } else {
@@ -189,7 +213,7 @@ public class XSQLODLUtils {
             Field f = findField(c, name);
             return f.get(o);
         } catch (Exception err) {
-            XSQLAdapter.log(err);
+            //XSQLAdapter.log(err);
         }
         return null;
     }
@@ -207,6 +231,21 @@ public class XSQLODLUtils {
         return (Map<?, ?>) get(o, "children");
     }
 
+    public static Collection<?> getChildrenCollection(Object o) {
+        Object value = get(o, "children");
+        if(value==null)
+            return Collections.emptyList();
+        if(value instanceof Map)
+            return ((Map<?,?>)value).values();
+        else
+        if(value instanceof Collection){
+            return (Collection<?>)value;
+        }else{
+            XSQLAdapter.log("Unknown Child Value Type="+value.getClass().getName());
+            return new ArrayList();
+        }
+    }
+
     public static Object getValue(Object o) {
         return get(o, "value");
     }
index 6689908204e19e9aced43dccbb28d2dd5a8f2c07..ea16e72dc91e1a438229e3e2e94f3e78c3eee09a 100644 (file)
@@ -1,10 +1,16 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
 package org.opendaylight.controller.md.sal.dom.xsql.jdbc;
 
 import java.io.InputStream;
 import java.io.Reader;
 import java.io.Serializable;
 import java.lang.reflect.Method;
-import java.lang.reflect.Proxy;
 import java.math.BigDecimal;
 import java.net.URL;
 import java.sql.Array;
@@ -24,6 +30,7 @@ import java.sql.Time;
 import java.sql.Timestamp;
 import java.util.ArrayList;
 import java.util.Calendar;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedList;
@@ -32,14 +39,18 @@ import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
 import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrint;
 import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrintNode;
 import org.opendaylight.controller.md.sal.dom.xsql.XSQLColumn;
 import org.opendaylight.controller.md.sal.dom.xsql.XSQLCriteria;
 import org.opendaylight.controller.md.sal.dom.xsql.XSQLODLUtils;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerNode;
 
-public class JDBCResultSet implements Serializable, ResultSet,
-        ResultSetMetaData {
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
+public class JDBCResultSet implements Serializable, ResultSet, ResultSetMetaData {
     private static final long serialVersionUID = -7450200738431047057L;
     private static final ClassLoader CLASS_LOADER = JDBCResultSet.class.getClassLoader();
     private static final Class<?>[] PROXY_INTERFACES = new Class[] { ResultSet.class };
@@ -57,27 +68,28 @@ public class JDBCResultSet implements Serializable, ResultSet,
     private Map<String, Map<XSQLColumn, List<XSQLCriteria>>> criteria = new ConcurrentHashMap<String, Map<XSQLColumn, List<XSQLCriteria>>>();
     private Exception err = null;
     private List<Record> EMPTY_RESULT = new LinkedList<Record>();
-    private transient Map<String,JDBCResultSet> subQueries = new HashMap<String,JDBCResultSet>();
+    private transient Map<String, JDBCResultSet> subQueries = new HashMap<String, JDBCResultSet>();
 
     public ResultSet getProxy() {
-         return (ResultSet) Proxy.newProxyInstance(CLASS_LOADER, PROXY_INTERFACES, new JDBCProxy(this));
+        return this;
+        //return (ResultSet) Proxy.newProxyInstance(CLASS_LOADER, PROXY_INTERFACES, new JDBCProxy(this));
     }
 
     public void setSQL(String _sql) {
         this.sql = _sql;
     }
 
-    public JDBCResultSet addSubQuery(String _sql,String logicalName) {
+    public JDBCResultSet addSubQuery(String _sql, String logicalName) {
         if (subQueries == null) {
-            subQueries = new HashMap<String,JDBCResultSet>();
+            subQueries = new HashMap<String, JDBCResultSet>();
         }
         JDBCResultSet rs = new JDBCResultSet(_sql);
-        this.subQueries.put(logicalName,rs);
+        this.subQueries.put(logicalName, rs);
         return rs;
     }
 
-    public Map<String,JDBCResultSet> getSubQueries() {
-        if (this.subQueries==null) {
+    public Map<String, JDBCResultSet> getSubQueries() {
+        if (this.subQueries == null) {
             this.subQueries = new HashMap<>();
         }
         return this.subQueries;
@@ -112,7 +124,8 @@ public class JDBCResultSet implements Serializable, ResultSet,
         }
     }
 
-    public int isObjectFitCriteria(Map<String, Object> objValues, String tableName) {
+    public int isObjectFitCriteria(Map<String, Object> objValues,
+            String tableName) {
         Map<XSQLColumn, List<XSQLCriteria>> tblCriteria = criteria
                 .get(tableName);
         if (tblCriteria == null) {
@@ -289,19 +302,41 @@ public class JDBCResultSet implements Serializable, ResultSet,
     }
 
     public static class Record {
+        // The map container the Attribute 2 the attribute value
         public Map<String, Object> data = new HashMap<>();
+        // The Element Object (Possibly some kind of NormalizedNode
         public Object element = null;
+        // Does this record fit the criteria
+        // In case of a list property, we first collect the list and only then
+        // we
+        // we decide which list item should be included or not.
+        public boolean fitCriteria = true;
 
         public Map<String, Object> getRecord() {
             return this.data;
         }
     }
 
-    private Map<String, Object> collectColumnValues(Object node, XSQLBluePrintNode bpn) {
-        Map<?, ?> subChildren = XSQLODLUtils.getChildren(node);
-        Map<String, Object> result = new HashMap<>();
-        for (Object stc : subChildren.values()) {
-            if (stc.getClass().getName().endsWith("ImmutableAugmentationNode")) {
+    public static class RecordsContainer {
+        public List<Record> records = new LinkedList<Record>();
+        public List<Record> fitRecords = new LinkedList<Record>();
+        public Object currentObject = null;
+    }
+
+    private void collectColumnValues(RecordsContainer rContainer,
+            XSQLBluePrintNode bpn) {
+        Collection<?> subChildren = XSQLODLUtils
+                .getChildrenCollection(rContainer.currentObject);
+        Record r = new Record();
+        r.element = rContainer.currentObject;
+        for (Object stc : subChildren) {
+            if (stc.getClass().getName()
+                    .endsWith("ImmutableUnkeyedListEntryNode")) {
+                r.fitCriteria = false;
+                rContainer.currentObject = stc;
+                collectColumnValues(rContainer, bpn);
+            } else if (stc.getClass().getName()
+                    .endsWith("ImmutableAugmentationNode")) {
                 Map<?, ?> values = XSQLODLUtils.getChildren(stc);
                 for (Object key : values.keySet()) {
                     Object val = values.get(key);
@@ -309,7 +344,7 @@ public class JDBCResultSet implements Serializable, ResultSet,
                         Object value = XSQLODLUtils.getValue(val);
                         String k = XSQLODLUtils.getNodeName(val);
                         if (value != null) {
-                            result.put(bpn.getBluePrintNodeName() + "." + k,
+                            r.data.put(bpn.getBluePrintNodeName() + "." + k,
                                     value.toString());
                         }
                     }
@@ -318,16 +353,17 @@ public class JDBCResultSet implements Serializable, ResultSet,
                 String k = XSQLODLUtils.getNodeName(stc);
                 Object value = XSQLODLUtils.getValue(stc);
                 if (value != null) {
-                    result.put(bpn.getBluePrintNodeName() + "." + k,
+                    r.data.put(bpn.getBluePrintNodeName() + "." + k,
                             value.toString());
                 }
             }
         }
-        return result;
+        if (r.fitCriteria) {
+            rContainer.records.add(r);
+        }
     }
 
-    private void addToData(Record rec, XSQLBluePrintNode bpn,
-            XSQLBluePrint bluePrint, Map<String, Object> fullRecord) {
+    private void addToData(Record rec, XSQLBluePrintNode bpn,XSQLBluePrint bluePrint, Map<String, Object> fullRecord) {
         XSQLBluePrintNode eNodes[] = bluePrint
                 .getBluePrintNodeByODLTableName(XSQLODLUtils
                         .getNodeIdentiofier(rec.element));
@@ -386,6 +422,11 @@ public class JDBCResultSet implements Serializable, ResultSet,
 
             String odlNodeName = XSQLODLUtils.getNodeIdentiofier(child);
             if (odlNodeName == null) {
+                if (child instanceof DataContainerNode) {
+                    List<Object> augChidlren = getChildren(child, tableName,
+                            bluePrint);
+                    result.addAll(augChidlren);
+                }
                 continue;
             }
 
@@ -407,7 +448,10 @@ public class JDBCResultSet implements Serializable, ResultSet,
                 continue;
             }
 
-            if (child.getClass().getName().endsWith("ImmutableContainerNode")) {
+            if (child.getClass().getName().endsWith("ImmutableUnkeyedListNode")) {
+                result.add(child);
+            } else if (child.getClass().getName()
+                    .endsWith("ImmutableContainerNode")) {
                 result.add(child);
             } else if (child.getClass().getName()
                     .endsWith("ImmutableAugmentationNode")) {
@@ -420,52 +464,76 @@ public class JDBCResultSet implements Serializable, ResultSet,
                 }
             } else if (child.getClass().getName().endsWith("ImmutableMapNode")) {
                 result.addAll(XSQLODLUtils.getMChildren(child));
+            } else {
+                XSQLAdapter.log("Missed Node Data OF Type="
+                        + child.getClass().getName());
             }
         }
         return result;
     }
 
-    public List<Record> addRecords(Object element, XSQLBluePrintNode node,boolean root, String tableName, XSQLBluePrint bluePrint) {
+    public List<Record> addRecords(Object element, XSQLBluePrintNode node,
+            boolean root, String tableName, XSQLBluePrint bluePrint) {
         List<Record> result = new LinkedList<Record>();
-        //In case this is a sibling to the requested table, the elenment type
-        //won't be in the path of the leaf node
-        if(node==null){
-            return result;
-        }
         String nodeID = XSQLODLUtils.getNodeIdentiofier(element);
         if (node.getODLTableName().equals(nodeID)) {
-            XSQLBluePrintNode bluePrintNode = bluePrint.getBluePrintNodeByODLTableName(nodeID)[0];
-            Record rec = new Record();
-            rec.element = element;
-            XSQLBluePrintNode bpn = this.tablesInQueryMap.get(bluePrintNode.getBluePrintNodeName());
-            if (this.criteria.containsKey(bluePrintNode.getBluePrintNodeName()) || bpn != null) {
-                Map<String, Object> allKeyValues = collectColumnValues(element, bpn);
-                if (!(isObjectFitCriteria(allKeyValues,
-                        bpn.getBluePrintNodeName()) == 1)) {
-                    return EMPTY_RESULT;
+            XSQLBluePrintNode bluePrintNode = bluePrint
+                    .getBluePrintNodeByODLTableName(nodeID)[0];
+            RecordsContainer rContainer = new RecordsContainer();
+            rContainer.currentObject = element;
+            XSQLBluePrintNode bpn = this.tablesInQueryMap.get(bluePrintNode
+                    .getBluePrintNodeName());
+            if (this.criteria.containsKey(bluePrintNode.getBluePrintNodeName())
+                    || bpn != null) {
+                collectColumnValues(rContainer, bpn);
+                for (Record r : rContainer.records) {
+                    if (!(isObjectFitCriteria(r.data,
+                            bpn.getBluePrintNodeName()) == 1)) {
+                        r.fitCriteria = false;
+                    }
+                    if (r.fitCriteria) {
+                        Record rec = new Record();
+                        rec.element = r.element;
+                        addToData(rec, bpn, bluePrint, r.data);
+                        rContainer.fitRecords.add(rec);
+                    }
                 }
-                addToData(rec, bpn, bluePrint, allKeyValues);
+                if (rContainer.fitRecords.isEmpty())
+                    return EMPTY_RESULT;
             }
-            if (root) {
-                addRecord(rec.data);
+            if (rContainer.records.isEmpty()) {
+                Record rec = new Record();
+                rec.element = rContainer.currentObject;
+                if (root) {
+                    addRecord(rec.data);
+                } else {
+                    result.add(rec);
+                }
             } else {
-                result.add(rec);
+                for (Record rec : rContainer.fitRecords) {
+                    if (root) {
+                        addRecord(rec.data);
+                    } else {
+                        result.add(rec);
+                    }
+                }
             }
             return result;
         }
 
         XSQLBluePrintNode parent = node.getParent();
-        List<Record> subRecords = addRecords(element, parent, false, tableName,bluePrint);
+        List<Record> subRecords = addRecords(element, parent, false, tableName,
+                bluePrint);
         for (Record subRec : subRecords) {
             List<Object> subO = getChildren(subRec.element, tableName,
                     bluePrint);
             if (subO != null) {
                 for (Object subData : subO) {
-                    Record rec = new Record();
-                    rec.element = subData;
-                    rec.data.putAll(subRec.data);
+                    RecordsContainer rContainer = new RecordsContainer();
+                    rContainer.currentObject = subData;
 
-                    String recID = XSQLODLUtils.getNodeIdentiofier(rec.element);
+                    String recID = XSQLODLUtils
+                            .getNodeIdentiofier(rContainer.currentObject);
                     XSQLBluePrintNode eNodes[] = bluePrint
                             .getBluePrintNodeByODLTableName(recID);
                     XSQLBluePrintNode bpn = null;
@@ -476,18 +544,24 @@ public class JDBCResultSet implements Serializable, ResultSet,
                             break;
                         }
                     }
-                    boolean isObjectInCriteria = true;
                     if (bpn != null) {
-                        Map<String, Object> allKeyValues = collectColumnValues(rec.element, bpn);
-                        if ((isObjectFitCriteria(allKeyValues,
-                                bpn.getBluePrintNodeName()) == 1)) {
-                            addToData(rec, bpn, bluePrint, allKeyValues);
-                        } else {
-                            isObjectInCriteria = false;
+                        collectColumnValues(rContainer, bpn);
+                        for (Record r : rContainer.records) {
+                            if ((isObjectFitCriteria(r.data,
+                                    bpn.getBluePrintNodeName()) == 1)) {
+                                Record rec = new Record();
+                                rec.data.putAll(subRec.data);
+                                rec.element = r.element;
+                                addToData(rec, bpn, bluePrint, r.data);
+                            } else {
+                                r.fitCriteria = false;
+                            }
                         }
                     }
-
-                    if (isObjectInCriteria) {
+                    if (rContainer.records.isEmpty()) {
+                        Record rec = new Record();
+                        rec.data.putAll(subRec.data);
+                        rec.element = rContainer.currentObject;
                         if (root) {
                             if (!rec.data.isEmpty()) {
                                 addRecord(rec.data);
@@ -495,11 +569,23 @@ public class JDBCResultSet implements Serializable, ResultSet,
                         } else {
                             result.add(rec);
                         }
+                    } else {
+                        for (Record r : rContainer.records) {
+                            r.data.putAll(subRec.data);
+                            if (r.fitCriteria) {
+                                if (root) {
+                                    if (!r.data.isEmpty()) {
+                                        addRecord(r.data);
+                                    }
+                                } else {
+                                    result.add(r);
+                                }
+                            }
+                        }
                     }
                 }
             }
         }
-
         return result;
     }
 
index 7b2733ccf784dbf0784cf316ac4d3343d3705f45..31941e496b4666108b2d03c84939044d288eef3b 100644 (file)
@@ -46,8 +46,7 @@ public class JDBCServer extends Thread {
         }
     }
 
-    public static void execute(JDBCResultSet rs, XSQLAdapter adapter)
-            throws SQLException {
+    public static void execute(JDBCResultSet rs, XSQLAdapter adapter)throws SQLException {
         if(rs.getSQL().toLowerCase().trim().equals("select 1")){
             rs.setFinished(true);
             return;
index cde01573f2c68d64649d008bb070faad556a5b45..29a1945a6e38a0e7e7ccde5995579dc1d4666536 100644 (file)
@@ -1,37 +1,47 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
 package org.opendaylight.xsql;
 
-import org.opendaylight.controller.sal.binding.api.data.DataModificationTransaction;
-import org.opendaylight.controller.sal.binding.api.data.DataProviderService;
+import org.opendaylight.controller.md.sal.binding.api.DataBroker;
+import org.opendaylight.controller.md.sal.binding.api.WriteTransaction;
+import org.opendaylight.controller.md.sal.common.api.data.LogicalDatastoreType;
+import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626.XSQL;
 import org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626.XSQLBuilder;
 import org.opendaylight.yangtools.yang.binding.InstanceIdentifier;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
- * Created by root on 6/26/14.
- */
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
 public class XSQLProvider implements AutoCloseable {
 
     public static final InstanceIdentifier<XSQL> ID = InstanceIdentifier.builder(XSQL.class).build();
-    private static final Logger LOG = LoggerFactory.getLogger(XSQLProvider.class);
+    //public static final InstanceIdentifier<SalTest> ID2 = InstanceIdentifier.builder(SalTest.class).build();
 
     public void close() {
     }
 
-    public XSQL buildXSQL(DataProviderService dps) {
+    public XSQL buildXSQL(DataBroker dps) {
+            XSQLAdapter.log("Building XSL...");
             XSQLBuilder builder = new XSQLBuilder();
             builder.setPort("34343");
             XSQL xsql = builder.build();
             try {
                 if (dps != null) {
-                    final DataModificationTransaction t = dps.beginTransaction();
-                    t.removeOperationalData(ID);
-                    t.putOperationalData(ID,xsql);
-                    t.commit().get();
+                    XSQLAdapter.log("Starting TRansaction...");
+                    WriteTransaction t = dps.newReadWriteTransaction();
+                    t.delete(LogicalDatastoreType.OPERATIONAL, ID);
+                    t.put(LogicalDatastoreType.OPERATIONAL,ID,xsql);
+                    XSQLAdapter.log("Submitting...");
+                    t.submit();
                 }
             } catch (Exception e) {
-                LOG.warn("Failed to update XSQL port status, ", e);
+                XSQLAdapter.log(e);
             }
         return xsql;
     }
index c8a5a85ae6d22b7f4113af265b117d302ae4a9ee..a669345e140d5e201219749a5d229df43bb3c864 100644 (file)
@@ -1,9 +1,19 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
 package org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626;
 
 import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
 import org.opendaylight.xsql.XSQLProvider;
-
+/**
+ * @author Sharon Aicler(saichler@gmail.com)
+ **/
 public class XSQLModule extends org.opendaylight.yang.gen.v1.http.netconfcentral.org.ns.xsql.rev140626.AbstractXSQLModule {
+    private static final long SLEEP_TIME_BEFORE_CREATING_TRANSACTION = 10000;
     public XSQLModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
         super(identifier, dependencyResolver);
     }
@@ -22,9 +32,14 @@ public class XSQLModule extends org.opendaylight.yang.gen.v1.http.netconfcentral
         XSQLAdapter xsqlAdapter = XSQLAdapter.getInstance();
         getSchemaServiceDependency().registerSchemaContextListener(xsqlAdapter);
         xsqlAdapter.setDataBroker(getAsyncDataBrokerDependency());
-        XSQLProvider p = new XSQLProvider();
-        //p.buildXSQL(getDataBrokerDependency());
+        final XSQLProvider p = new XSQLProvider();
+        Runnable runthis = new Runnable() {
+            @Override
+            public void run() {
+                try{Thread.sleep(SLEEP_TIME_BEFORE_CREATING_TRANSACTION);}catch(Exception err){}
+                p.buildXSQL(getDataBrokerDependency());
+            }
+        };
         return p;
     }
-
 }
index d7d547d19e847303d88f94a7e75d80149c5a4b13..1b9a37df66ae351415a517064bb1b86a99b46e04 100644 (file)
@@ -8,8 +8,8 @@
                     </type>
                     <name>XSQL</name>
                     <data-broker>
-                       <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-data-broker</type>
-                       <name>binding-data-broker</name>
+                        <type xmlns:binding="urn:opendaylight:params:xml:ns:yang:controller:md:sal:binding">binding:binding-async-data-broker</type>
+                        <name>binding-data-broker</name>
                     </data-broker>
                     <async-data-broker>
                         <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
index 0437e10e34d688295d645ff0a3b47c3fcf1327a4..f0f52694a61b36e1511377868de9f24c494448c7 100644 (file)
@@ -37,14 +37,14 @@ module XSQL{
         case XSQL {
             when "/config:modules/config:module/config:type = 'XSQL'";
 
-                       container data-broker {
+            container data-broker {
                 uses config:service-ref {
                     refine type {
                         mandatory false;
-                        config:required-identity mdsal:binding-data-broker;
+                        config:required-identity mdsal:binding-async-data-broker;
                     }
                 }
-            }         
+            }
 
             container async-data-broker {
                 uses config:service-ref {
index 8a6b184f820b91b838d077f8ad640de7f271ad8d..e3f5fbb81018f9a915b37bca39d401ad96b0ca29 100644 (file)
@@ -2,18 +2,29 @@ package org.opendaylight.xsql.test;
 
 import java.io.InputStream;
 import java.sql.SQLException;
+import java.util.Collections;
+import java.util.Set;
 
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.opendaylight.controller.md.sal.dom.xsql.XSQLAdapter;
 import org.opendaylight.controller.md.sal.dom.xsql.XSQLBluePrint;
 import org.opendaylight.controller.md.sal.dom.xsql.jdbc.JDBCResultSet;
 import org.opendaylight.controller.md.sal.dom.xsql.jdbc.JDBCServer;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
 
 public class XSQLTest {
-
-    XSQLBluePrint bluePrint = null;
+    private static final String DATASTORE_TEST_YANG = "/sal-persisted-dom-test.yang";
+    private XSQLBluePrint bluePrint = null;
+    //private static SchemaContext schemaContext = null;
+    @BeforeClass
+    public static void loadSchemaContext(){
+        //schemaContext = createTestContext();
+    }
 
     @Before
     public void before() {
@@ -167,4 +178,18 @@ public class XSQLTest {
         System.out.print("*** XSQL Tests -");
         System.out.println(str);
     }
+
+    public static final InputStream getDatastoreTestInputStream() {
+        return getInputStream(DATASTORE_TEST_YANG);
+    }
+
+    private static InputStream getInputStream(final String resourceName) {
+        return XSQLTest.class.getResourceAsStream(DATASTORE_TEST_YANG);
+    }
+
+    public static SchemaContext createTestContext() {
+        YangParserImpl parser = new YangParserImpl();
+        Set<Module> modules = parser.parseYangModelsFromStreams(Collections.singletonList(getDatastoreTestInputStream()));
+        return parser.resolveSchemaContext(modules);
+    }
 }
index b6b34acfcd2b9e983e71320e0dd6f11a03c5d8c5..152f7878df6ab928a53c5a5e0a7c1f638b208faa 100644 (file)
Binary files a/opendaylight/md-sal/sal-dom-xsql/src/test/resources/BluePrintCache.dat and b/opendaylight/md-sal/sal-dom-xsql/src/test/resources/BluePrintCache.dat differ
index 5b0f73942864c2c4c4498c56abb3b365b63189e9..05e3d5cb26e5944dc46de6a6d3e7b7fc87ad56b3 100644 (file)
@@ -12,13 +12,14 @@ import com.google.common.util.concurrent.FutureCallback;
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
+import org.opendaylight.controller.sal.core.spi.data.ForwardingDOMStoreThreePhaseCommitCohort;
 
-final class ChainedTransactionCommitImpl implements DOMStoreThreePhaseCommitCohort {
+final class ChainedTransactionCommitImpl extends ForwardingDOMStoreThreePhaseCommitCohort {
     private final SnapshotBackedWriteTransaction transaction;
     private final DOMStoreThreePhaseCommitCohort delegate;
     private final DOMStoreTransactionChainImpl txChain;
 
-    protected ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction,
+    ChainedTransactionCommitImpl(final SnapshotBackedWriteTransaction transaction,
             final DOMStoreThreePhaseCommitCohort delegate, final DOMStoreTransactionChainImpl txChain) {
         this.transaction = Preconditions.checkNotNull(transaction);
         this.delegate = Preconditions.checkNotNull(delegate);
@@ -26,23 +27,13 @@ final class ChainedTransactionCommitImpl implements DOMStoreThreePhaseCommitCoho
     }
 
     @Override
-    public ListenableFuture<Boolean> canCommit() {
-        return delegate.canCommit();
-    }
-
-    @Override
-    public ListenableFuture<Void> preCommit() {
-        return delegate.preCommit();
-    }
-
-    @Override
-    public ListenableFuture<Void> abort() {
-        return delegate.abort();
+    protected DOMStoreThreePhaseCommitCohort delegate() {
+        return delegate;
     }
 
     @Override
     public ListenableFuture<Void> commit() {
-        ListenableFuture<Void> commitFuture = delegate.commit();
+        ListenableFuture<Void> commitFuture = super.commit();
         Futures.addCallback(commitFuture, new FutureCallback<Void>() {
             @Override
             public void onFailure(final Throwable t) {
@@ -56,4 +47,5 @@ final class ChainedTransactionCommitImpl implements DOMStoreThreePhaseCommitCoho
         });
         return commitFuture;
     }
+
 }
\ No newline at end of file
index 1f85b473feb9478f849e33fc278c7907f5530e9e..b617a8087fc771d6f413b1ec6c3dc4e5bf597aa3 100644 (file)
@@ -22,6 +22,7 @@ import org.opendaylight.controller.md.sal.common.api.data.TransactionCommitFaile
 import org.opendaylight.controller.md.sal.dom.api.DOMDataTreeChangeListener;
 import org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype;
 import org.opendaylight.controller.md.sal.dom.store.impl.tree.ListenerTree;
+import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStore;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadWriteTransaction;
@@ -223,6 +224,13 @@ public class InMemoryDOMDataStore extends TransactionReadyPrototype implements D
         return name + "-" + txCounter.getAndIncrement();
     }
 
+    private static void warnDebugContext(AbstractDOMStoreTransaction<?> transaction) {
+        final Throwable ctx = transaction.getDebugContext();
+        if (ctx != null) {
+            LOG.warn("Transaction {} has been allocated in the following context", transaction.getIdentifier(), ctx);
+        }
+    }
+
     private final class ThreePhaseCommitImpl implements DOMStoreThreePhaseCommitCohort {
         private final SnapshotBackedWriteTransaction transaction;
         private final DataTreeModification modification;
@@ -244,12 +252,12 @@ public class InMemoryDOMDataStore extends TransactionReadyPrototype implements D
             } catch (ConflictingModificationAppliedException e) {
                 LOG.warn("Store Tx: {} Conflicting modification for {}.", transaction.getIdentifier(),
                         e.getPath());
-                transaction.warnDebugContext(LOG);
+                warnDebugContext(transaction);
                 return Futures.immediateFailedFuture(new OptimisticLockFailedException("Optimistic lock failed.", e));
             } catch (DataValidationFailedException e) {
                 LOG.warn("Store Tx: {} Data Precondition failed for {}.", transaction.getIdentifier(),
                         e.getPath(), e);
-                transaction.warnDebugContext(LOG);
+                warnDebugContext(transaction);
 
                 // For debugging purposes, allow dumping of the modification. Coupled with the above
                 // precondition log, it should allow us to understand what went on.
index 999fb91c659e20bed7a481ce3b12013efa230586..feb1b66dd3738478af4e663e32123d8b41b8142e 100644 (file)
@@ -19,6 +19,7 @@ import org.opendaylight.yangtools.util.concurrent.QueuedNotificationManager.Invo
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
 import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
+import org.opendaylight.yangtools.yang.data.api.schema.tree.spi.DefaultDataTreeCandidate;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -43,7 +44,7 @@ final class InMemoryDOMStoreTreeChangePublisher extends AbstractDOMStoreTreeChan
 
     @Override
     protected void notifyListeners(final Collection<AbstractDOMDataTreeChangeListenerRegistration<?>> registrations, final YangInstanceIdentifier path, final DataTreeCandidateNode node) {
-        final DataTreeCandidate candidate = new SimpleDataTreeCandidate(path, node);
+        final DataTreeCandidate candidate = new DefaultDataTreeCandidate(path, node);
 
         for (AbstractDOMDataTreeChangeListenerRegistration<?> reg : registrations) {
             LOG.debug("Enqueueing candidate {} to registration {}", candidate, registrations);
diff --git a/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SimpleDataTreeCandidate.java b/opendaylight/md-sal/sal-inmemory-datastore/src/main/java/org/opendaylight/controller/md/sal/dom/store/impl/SimpleDataTreeCandidate.java
deleted file mode 100644 (file)
index 701841c..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
- *
- * This program and the accompanying materials are made available under the
- * terms of the Eclipse Public License v1.0 which accompanies this distribution,
- * and is available at http://www.eclipse.org/legal/epl-v10.html
- */
-package org.opendaylight.controller.md.sal.dom.store.impl;
-
-import com.google.common.base.MoreObjects;
-import com.google.common.base.Preconditions;
-import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidate;
-import org.opendaylight.yangtools.yang.data.api.schema.tree.DataTreeCandidateNode;
-
-final class SimpleDataTreeCandidate implements DataTreeCandidate {
-    private final YangInstanceIdentifier rootPath;
-    private final DataTreeCandidateNode rootNode;
-
-    SimpleDataTreeCandidate(final YangInstanceIdentifier rootPath, final DataTreeCandidateNode rootNode) {
-        this.rootPath = Preconditions.checkNotNull(rootPath);
-        this.rootNode = Preconditions.checkNotNull(rootNode);
-    }
-
-    @Override
-    public DataTreeCandidateNode getRootNode() {
-        return rootNode;
-    }
-
-    @Override
-    public YangInstanceIdentifier getRootPath() {
-        return rootPath;
-    }
-
-    @Override
-    public String toString() {
-        return MoreObjects.toStringHelper(this).add("rootPath", rootPath).add("rootNode", rootNode).toString();
-    }
-}
\ No newline at end of file
index ed95796499b3e9a6887d5708a76d636934aa9027..8b18be432a4b42aa422526ca432596ecec68911c 100644 (file)
@@ -8,13 +8,12 @@
 package org.opendaylight.controller.md.sal.dom.store.impl;
 
 import static com.google.common.base.Preconditions.checkNotNull;
-
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.CheckedFuture;
 import com.google.common.util.concurrent.Futures;
-
 import org.opendaylight.controller.md.sal.common.api.data.ReadFailedException;
+import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreReadTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
@@ -30,7 +29,7 @@ import org.slf4j.LoggerFactory;
  * which delegates most of its calls to similar methods provided by underlying snapshot.
  *
  */
-final class SnapshotBackedReadTransaction extends AbstractDOMStoreTransaction
+final class SnapshotBackedReadTransaction extends AbstractDOMStoreTransaction<Object>
                                           implements DOMStoreReadTransaction {
 
     private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedReadTransaction.class);
index faddbae850ce50753dc7ca1e7298bdc6e77c0f21..10e3a7df10de5f41ee888b392e753479f5f186a9 100644 (file)
@@ -13,6 +13,7 @@ import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Throwables;
 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import org.opendaylight.controller.sal.core.spi.data.AbstractDOMStoreTransaction;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreThreePhaseCommitCohort;
 import org.opendaylight.controller.sal.core.spi.data.DOMStoreWriteTransaction;
 import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
@@ -28,7 +29,7 @@ import org.slf4j.LoggerFactory;
  * {@link org.opendaylight.controller.md.sal.dom.store.impl.SnapshotBackedWriteTransaction.TransactionReadyPrototype}.
  *
  */
-class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction implements DOMStoreWriteTransaction {
+class SnapshotBackedWriteTransaction extends AbstractDOMStoreTransaction<Object> implements DOMStoreWriteTransaction {
     private static final Logger LOG = LoggerFactory.getLogger(SnapshotBackedWriteTransaction.class);
     private static final AtomicReferenceFieldUpdater<SnapshotBackedWriteTransaction, TransactionReadyPrototype> READY_UPDATER =
             AtomicReferenceFieldUpdater.newUpdater(SnapshotBackedWriteTransaction.class, TransactionReadyPrototype.class, "readyImpl");
index cdce946aba6833006dce994a6c7ee9dc87624311..db9b702fed43e86b7a8446f6e705b5463326e890 100644 (file)
@@ -21,7 +21,6 @@ import com.google.common.util.concurrent.ListenableFuture;
 import com.google.common.util.concurrent.ListeningExecutorService;
 import com.google.common.util.concurrent.MoreExecutors;
 import java.util.Collection;
-import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Set;
@@ -325,31 +324,24 @@ public final class NetconfDevice implements RemoteDevice<NetconfSessionPreferenc
 
         @Override
         public DeviceSources call() throws Exception {
-
-            final Set<SourceIdentifier> requiredSources = Sets.newHashSet(Collections2.transform(
-                    remoteSessionCapabilities.getModuleBasedCaps(), QNAME_TO_SOURCE_ID_FUNCTION));
-
-            // If monitoring is not supported, we will still attempt to create schema, sources might be already provided
             final NetconfStateSchemas availableSchemas = stateSchemasResolver.resolve(deviceRpc, remoteSessionCapabilities, id);
             logger.debug("{}: Schemas exposed by ietf-netconf-monitoring: {}", id, availableSchemas.getAvailableYangSchemasQNames());
 
-            final Set<SourceIdentifier> providedSources = Sets.newHashSet(Collections2.transform(
-                    availableSchemas.getAvailableYangSchemasQNames(), QNAME_TO_SOURCE_ID_FUNCTION));
-
-            final Set<SourceIdentifier> requiredSourcesNotProvided = Sets.difference(requiredSources, providedSources);
+            final Set<QName> requiredSources = Sets.newHashSet(remoteSessionCapabilities.getModuleBasedCaps());
+            final Set<QName> providedSources = availableSchemas.getAvailableYangSchemasQNames();
 
+            final Set<QName> requiredSourcesNotProvided = Sets.difference(requiredSources, providedSources);
             if (!requiredSourcesNotProvided.isEmpty()) {
                 logger.warn("{}: Netconf device does not provide all yang models reported in hello message capabilities, required but not provided: {}",
                         id, requiredSourcesNotProvided);
                 logger.warn("{}: Attempting to build schema context from required sources", id);
             }
 
-
             // Here all the sources reported in netconf monitoring are merged with those reported in hello.
             // It is necessary to perform this since submodules are not mentioned in hello but still required.
             // This clashes with the option of a user to specify supported yang models manually in configuration for netconf-connector
             // and as a result one is not able to fully override yang models of a device. It is only possible to add additional models.
-            final Set<SourceIdentifier> providedSourcesNotRequired = Sets.difference(providedSources, requiredSources);
+            final Set<QName> providedSourcesNotRequired = Sets.difference(providedSources, requiredSources);
             if (!providedSourcesNotRequired.isEmpty()) {
                 logger.warn("{}: Netconf device provides additional yang models not reported in hello message capabilities: {}",
                         id, providedSourcesNotRequired);
@@ -366,22 +358,30 @@ public final class NetconfDevice implements RemoteDevice<NetconfSessionPreferenc
      * Contains RequiredSources - sources from capabilities.
      */
     private static final class DeviceSources {
-        private final Collection<SourceIdentifier> requiredSources;
-        private final Collection<SourceIdentifier> providedSources;
+        private final Set<QName> requiredSources;
+        private final Set<QName> providedSources;
 
-        public DeviceSources(final Collection<SourceIdentifier> requiredSources, final Collection<SourceIdentifier> providedSources) {
+        public DeviceSources(final Set<QName> requiredSources, final Set<QName> providedSources) {
             this.requiredSources = requiredSources;
             this.providedSources = providedSources;
         }
 
-        public Collection<SourceIdentifier> getRequiredSources() {
+        public Set<QName> getRequiredSourcesQName() {
             return requiredSources;
         }
 
-        public Collection<SourceIdentifier> getProvidedSources() {
+        public Set<QName> getProvidedSourcesQName() {
             return providedSources;
         }
 
+        public Collection<SourceIdentifier> getRequiredSources() {
+            return Collections2.transform(requiredSources, QNAME_TO_SOURCE_ID_FUNCTION);
+        }
+
+        public Collection<SourceIdentifier> getProvidedSources() {
+            return Collections2.transform(providedSources, QNAME_TO_SOURCE_ID_FUNCTION);
+        }
+
     }
 
     /**
@@ -414,7 +414,9 @@ public final class NetconfDevice implements RemoteDevice<NetconfSessionPreferenc
 
             // If no more sources, fail
             if(requiredSources.isEmpty()) {
-                handleSalInitializationFailure(new IllegalStateException(id + ": No more sources for schema context"), listener);
+                final IllegalStateException cause = new IllegalStateException(id + ": No more sources for schema context");
+                handleSalInitializationFailure(cause, listener);
+                salFacade.onDeviceFailed(cause);
                 return;
             }
 
@@ -425,7 +427,7 @@ public final class NetconfDevice implements RemoteDevice<NetconfSessionPreferenc
                 @Override
                 public void onSuccess(final SchemaContext result) {
                     logger.debug("{}: Schema context built successfully from {}", id, requiredSources);
-                    final Collection<QName> filteredQNames = Sets.difference(remoteSessionCapabilities.getModuleBasedCaps(), capabilities.getUnresolvedCapabilites().keySet());
+                    final Collection<QName> filteredQNames = Sets.difference(deviceSources.getProvidedSourcesQName(), capabilities.getUnresolvedCapabilites().keySet());
                     capabilities.addCapabilities(filteredQNames);
                     capabilities.addNonModuleBasedCapabilities(remoteSessionCapabilities.getNonModuleCaps());
                     handleSalInitializationSuccess(result, remoteSessionCapabilities, getDeviceSpecificRpc(result));
@@ -470,27 +472,36 @@ public final class NetconfDevice implements RemoteDevice<NetconfSessionPreferenc
         }
 
         private Collection<QName> getQNameFromSourceIdentifiers(final Collection<SourceIdentifier> identifiers) {
-            final Collection<QName> qNames = new HashSet<>();
-            for (final SourceIdentifier source : identifiers) {
-                final Optional<QName> qname = getQNameFromSourceIdentifier(source);
-                if (qname.isPresent()) {
-                    qNames.add(qname.get());
+            final Collection<QName> qNames = Collections2.transform(identifiers, new Function<SourceIdentifier, QName>() {
+                @Override
+                public QName apply(final SourceIdentifier sourceIdentifier) {
+                    return getQNameFromSourceIdentifier(sourceIdentifier);
                 }
-            }
+            });
+
             if (qNames.isEmpty()) {
                 logger.debug("Unable to map any source identfiers to a capability reported by device : " + identifiers);
             }
             return qNames;
         }
 
-        private Optional<QName> getQNameFromSourceIdentifier(final SourceIdentifier identifier) {
-            for (final QName qname : remoteSessionCapabilities.getModuleBasedCaps()) {
-                if (qname.getLocalName().equals(identifier.getName())
-                        && qname.getFormattedRevision().equals(identifier.getRevision())) {
-                    return Optional.of(qname);
+        private QName getQNameFromSourceIdentifier(final SourceIdentifier identifier) {
+            // Required sources are all required and provided merged in DeviceSourcesResolver
+            for (final QName qname : deviceSources.getRequiredSourcesQName()) {
+                if(qname.getLocalName().equals(identifier.getName()) == false) {
+                    continue;
+                }
+
+                if(identifier.getRevision().equals(SourceIdentifier.NOT_PRESENT_FORMATTED_REVISION) &&
+                        qname.getRevision() == null) {
+                    return qname;
+                }
+
+                if (qname.getFormattedRevision().equals(identifier.getRevision())) {
+                    return qname;
                 }
             }
-            throw new IllegalArgumentException("Unable to map identifier to a devices reported capability: " + identifier);
+            throw new IllegalArgumentException("Unable to map identifier to a devices reported capability: " + identifier + " Available: " + deviceSources.getRequiredSourcesQName());
         }
     }
 }
index 942e4bbaeb3339485f68798974552d7ffdff800a..645028b13f8e842c4317f7526d05f90f66eab81f 100644 (file)
@@ -174,7 +174,7 @@ public final class NetconfStateSchemas {
     public final static class RemoteYangSchema {
         private final QName qname;
 
-        private RemoteYangSchema(final QName qname) {
+        RemoteYangSchema(final QName qname) {
             this.qname = qname;
         }
 
index 0f643789ecc64cb9bb6b662054878775b8a337af..93f4df83e44382b0f87b94a80c30b29ab98b4c4e 100644 (file)
@@ -19,7 +19,9 @@ import static org.mockito.Mockito.verify;
 
 import com.google.common.base.Optional;
 import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 import com.google.common.util.concurrent.Futures;
 import java.io.InputStream;
 import java.util.ArrayList;
@@ -46,6 +48,7 @@ import org.opendaylight.controller.sal.connect.netconf.listener.NetconfSessionPr
 import org.opendaylight.controller.sal.connect.netconf.sal.NetconfDeviceRpc;
 import org.opendaylight.controller.sal.connect.netconf.util.NetconfMessageTransformUtil;
 import org.opendaylight.controller.sal.connect.util.RemoteDeviceId;
+import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
 import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
 import org.opendaylight.yangtools.yang.model.api.Module;
@@ -132,6 +135,7 @@ public class NetconfDeviceTest {
     public void testNetconfDeviceMissingSource() throws Exception {
         final RemoteDeviceHandler<NetconfSessionPreferences> facade = getFacade();
         final NetconfDeviceCommunicator listener = getListener();
+        final SchemaContext schema = getSchema();
 
         final SchemaContextFactory schemaFactory = getSchemaFactory();
 
@@ -143,13 +147,23 @@ public class NetconfDeviceTest {
                 if(((Collection<?>) invocation.getArguments()[0]).size() == 2) {
                     return Futures.immediateFailedCheckedFuture(schemaResolutionException);
                 } else {
-                    return Futures.immediateCheckedFuture(getSchema());
+                    return Futures.immediateCheckedFuture(schema);
                 }
             }
         }).when(schemaFactory).createSchemaContext(anyCollectionOf(SourceIdentifier.class));
 
         final NetconfDevice.SchemaResourcesDTO schemaResourcesDTO
-                = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, stateSchemasResolver);
+                = new NetconfDevice.SchemaResourcesDTO(getSchemaRegistry(), schemaFactory, new NetconfStateSchemas.NetconfStateSchemasResolver() {
+            @Override
+            public NetconfStateSchemas resolve(final NetconfDeviceRpc deviceRpc, final NetconfSessionPreferences remoteSessionCapabilities, final RemoteDeviceId id) {
+                final Module first = Iterables.getFirst(schema.getModules(), null);
+                final QName qName = QName.create(first.getQNameModule(), first.getName());
+                final NetconfStateSchemas.RemoteYangSchema source1 = new NetconfStateSchemas.RemoteYangSchema(qName);
+                final NetconfStateSchemas.RemoteYangSchema source2 = new NetconfStateSchemas.RemoteYangSchema(QName.create(first.getQNameModule(), "test-module2"));
+                return new NetconfStateSchemas(Sets.newHashSet(source1, source2));
+            }
+        });
+
         final NetconfDevice device = new NetconfDevice(schemaResourcesDTO, getId(), facade, getExecutor(), true);
         // Monitoring supported
         final NetconfSessionPreferences sessionCaps = getSessionCaps(true, Lists.newArrayList(TEST_CAPABILITY, TEST_CAPABILITY2));
index d777942b53ede401e84f340bc2935628770b8550..0e660eaf6de50f7876e05e922956e24dfc6e3611 100644 (file)
             <param-name>javax.ws.rs.Application</param-name>
             <param-value>org.opendaylight.controller.sal.rest.doc.jaxrs.ApiDocApplication</param-value>
         </init-param>
+        <!-- AAA Auth Filter -->
+        <init-param>
+            <param-name>com.sun.jersey.spi.container.ContainerRequestFilters</param-name>
+            <param-value> org.opendaylight.aaa.sts.TokenAuthFilter</param-value>
+        </init-param>
         <load-on-startup>1</load-on-startup>
     </servlet>
 
 
     <security-constraint>
       <web-resource-collection>
-        <web-resource-name>free access</web-resource-name>
-        <url-pattern>/explorer/css/*</url-pattern>
-        <url-pattern>/explorer/images/*</url-pattern>
-        <url-pattern>/explorer/lib/*</url-pattern>
-        <url-pattern>/explorer/*</url-pattern>
+        <web-resource-name>API Doc</web-resource-name>
+        <url-pattern>/*</url-pattern>
       </web-resource-collection>
     </security-constraint>
 
index bc84734190937a8882583b165a72fc521dbb535f..50676c57c18faa8aa5320634ba3e638817439be9 100644 (file)
@@ -243,8 +243,6 @@ public class EditConfig extends AbstractConfigNetconfOperation {
             }
 
             Date revision = module.getRevision();
-            Preconditions.checkState(!revisionsByNamespace.containsKey(revision),
-                    "Duplicate revision %s for namespace %s", revision, namespace);
 
             IdentityMapping identityMapping = revisionsByNamespace.get(revision);
             if(identityMapping == null) {
index 0d3370548a5de0468ac138ee244bc9b2575d0a60..283ec424badac4a18703044cd95175f5b0aeef88 100644 (file)
@@ -8,11 +8,16 @@
 
 package org.opendaylight.controller.netconf.confignetconfconnector.osgi;
 
+import com.google.common.base.Optional;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
 import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.NoSuchElementException;
 import java.util.Set;
 import org.opendaylight.controller.config.yangjmxgenerator.ModuleMXBeanEntry;
 import org.opendaylight.controller.config.yangjmxgenerator.PackageTranslator;
@@ -23,6 +28,7 @@ import org.opendaylight.yangtools.yang.common.QName;
 import org.opendaylight.yangtools.yang.model.api.IdentitySchemaNode;
 import org.opendaylight.yangtools.yang.model.api.Module;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.parser.builder.impl.ModuleIdentifierImpl;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -99,12 +105,31 @@ final class YangStoreSnapshot implements YangStoreContext {
 
     @Override
     public Set<Module> getModules() {
-        return schemaContext.getModules();
+        final Set<Module> modules = Sets.newHashSet(schemaContext.getModules());
+        for (final Module module : schemaContext.getModules()) {
+            modules.addAll(module.getSubmodules());
+        }
+        return modules;
     }
 
     @Override
     public String getModuleSource(final org.opendaylight.yangtools.yang.model.api.ModuleIdentifier moduleIdentifier) {
-        return schemaContext.getModuleSource(moduleIdentifier).get();
+        final Optional<String> moduleSource = schemaContext.getModuleSource(moduleIdentifier);
+        if(moduleSource.isPresent()) {
+            return moduleSource.get();
+        } else {
+            try {
+                return Iterables.find(getModules(), new Predicate<Module>() {
+                    @Override
+                    public boolean apply(final Module input) {
+                        final ModuleIdentifierImpl id = new ModuleIdentifierImpl(input.getName(), Optional.fromNullable(input.getNamespace()), Optional.fromNullable(input.getRevision()));
+                        return id.equals(moduleIdentifier);
+                    }
+                }).getSource();
+            } catch (final NoSuchElementException e) {
+                throw new IllegalArgumentException("Source for yang module " + moduleIdentifier + " not found", e);
+            }
+        }
     }
 
     @Override
index bf8bdb06f31dd37c3a27bb2e8236f041fad22f4f..bf41f190e6a689f143dd59ec31db0158a3692cb5 100644 (file)
@@ -10,7 +10,7 @@ package org.opendaylight.controller.config.yang.netconf.mdsal.mapper;
 
 import org.opendaylight.controller.netconf.mdsal.connector.MdsalNetconfOperationServiceFactory;
 
-public class NetconfMdsalMapperModule extends org.opendaylight.controller.config.yang.netconf.mdsal.mapper.AbstractNetconfMdsalMapperModule {
+public class NetconfMdsalMapperModule extends org.opendaylight.controller.config.yang.netconf.mdsal.mapper.AbstractNetconfMdsalMapperModule{
     public NetconfMdsalMapperModule(org.opendaylight.controller.config.api.ModuleIdentifier identifier, org.opendaylight.controller.config.api.DependencyResolver dependencyResolver) {
         super(identifier, dependencyResolver);
     }
@@ -26,13 +26,15 @@ public class NetconfMdsalMapperModule extends org.opendaylight.controller.config
 
     @Override
     public java.lang.AutoCloseable createInstance() {
-        final MdsalNetconfOperationServiceFactory mdsalNetconfOperationServiceFactory = new MdsalNetconfOperationServiceFactory(getRootSchemaServiceDependency(), getDomBrokerDependency()) {
-            @Override
-            public void close() throws Exception {
-                super.close();
-                getMapperAggregatorDependency().onRemoveNetconfOperationServiceFactory(this);
-            }
-        };
+        final MdsalNetconfOperationServiceFactory mdsalNetconfOperationServiceFactory =
+                new MdsalNetconfOperationServiceFactory(getRootSchemaServiceDependency()) {
+                    @Override
+                    public void close() throws Exception {
+                        super.close();
+                        getMapperAggregatorDependency().onRemoveNetconfOperationServiceFactory(this);
+                    }
+                };
+        getDomBrokerDependency().registerConsumer(mdsalNetconfOperationServiceFactory);
         getMapperAggregatorDependency().onAddNetconfOperationServiceFactory(mdsalNetconfOperationServiceFactory);
         return mdsalNetconfOperationServiceFactory;
     }
index cc22dd51aaa5c8bdbcaffda3ba09d2ac509a68e4..2f5bb098f5bef496ce02b0e37d91b217c6dfabd3 100644 (file)
@@ -10,6 +10,7 @@ package org.opendaylight.controller.netconf.mdsal.connector;
 
 import java.util.Set;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
 import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
 import org.opendaylight.controller.netconf.mapping.api.NetconfOperationService;
 
@@ -18,8 +19,8 @@ public class MdsalNetconfOperationService implements NetconfOperationService {
     private final OperationProvider operationProvider;
 
     public MdsalNetconfOperationService(final CurrentSchemaContext schemaContext, final String netconfSessionIdForReporting,
-                                        final DOMDataBroker dataBroker) {
-        this.operationProvider = new OperationProvider(netconfSessionIdForReporting, schemaContext, dataBroker);
+                                        final DOMDataBroker dataBroker, final DOMRpcService rpcService) {
+        this.operationProvider = new OperationProvider(netconfSessionIdForReporting, schemaContext, dataBroker, rpcService);
     }
 
     @Override
index 89ce149e12f75d2f2c63dbd70aa823e95ce6b324..96244fdc68a366c1e435705a11ac3ad2b60023ad 100644 (file)
@@ -8,36 +8,44 @@
 
 package org.opendaylight.controller.netconf.mdsal.connector;
 
+import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.Set;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
 import org.opendaylight.controller.netconf.api.Capability;
 import org.opendaylight.controller.netconf.api.monitoring.CapabilityListener;
 import org.opendaylight.controller.netconf.mapping.api.NetconfOperationServiceFactory;
 import org.opendaylight.controller.netconf.util.capability.BasicCapability;
 import org.opendaylight.controller.netconf.util.capability.YangModuleCapability;
+import org.opendaylight.controller.sal.core.api.Broker.ConsumerSession;
+import org.opendaylight.controller.sal.core.api.Consumer;
 import org.opendaylight.controller.sal.core.api.model.SchemaService;
 import org.opendaylight.yangtools.yang.model.api.Module;
 import org.opendaylight.yangtools.yang.model.api.SchemaContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-public class MdsalNetconfOperationServiceFactory implements NetconfOperationServiceFactory, AutoCloseable {
+public class MdsalNetconfOperationServiceFactory implements NetconfOperationServiceFactory, Consumer, AutoCloseable {
 
     private static final Logger LOG = LoggerFactory.getLogger(MdsalNetconfOperationServiceFactory.class);
 
-    private final DOMDataBroker dataBroker;
+    private ConsumerSession session = null;
+    private DOMDataBroker dataBroker = null;
+    private DOMRpcService rpcService = null;
     private final CurrentSchemaContext currentSchemaContext;
 
-    public MdsalNetconfOperationServiceFactory(final SchemaService schemaService, final DOMDataBroker domDataBroker) {
+    public MdsalNetconfOperationServiceFactory(final SchemaService schemaService) {
         this.currentSchemaContext = new CurrentSchemaContext(Preconditions.checkNotNull(schemaService));
-        this.dataBroker = Preconditions.checkNotNull(domDataBroker);
     }
 
     @Override
     public MdsalNetconfOperationService createService(final String netconfSessionIdForReporting) {
-        return new MdsalNetconfOperationService(currentSchemaContext, netconfSessionIdForReporting, dataBroker);
+        Preconditions.checkState(dataBroker != null, "MD-SAL provider not yet initialized");
+        return new MdsalNetconfOperationService(currentSchemaContext, netconfSessionIdForReporting, dataBroker, rpcService);
     }
 
     @Override
@@ -50,28 +58,53 @@ public class MdsalNetconfOperationServiceFactory implements NetconfOperationServ
         return transformCapabilities(currentSchemaContext.getCurrentContext());
     }
 
-    static Set<Capability> transformCapabilities(final SchemaContext currentContext1) {
+    static Set<Capability> transformCapabilities(final SchemaContext currentContext) {
         final Set<Capability> capabilities = new HashSet<>();
         // [RFC6241] 8.3.  Candidate Configuration Capability
         capabilities.add(new BasicCapability("urn:ietf:params:netconf:capability:candidate:1.0"));
 
-        final SchemaContext currentContext = currentContext1;
         final Set<Module> modules = currentContext.getModules();
         for (final Module module : modules) {
-            if(currentContext.getModuleSource(module).isPresent()) {
-                capabilities.add(new YangModuleCapability(module, currentContext.getModuleSource(module).get()));
-            } else {
-                LOG.warn("Missing source for module {}. This module will not be available from netconf server",
-                        module);
+            Optional<YangModuleCapability> cap = moduleToCapability(module);
+            if(cap.isPresent()) {
+                capabilities.add(cap.get());
+            }
+            for (final Module submodule : module.getSubmodules()) {
+                cap = moduleToCapability(submodule);
+                if(cap.isPresent()) {
+                    capabilities.add(cap.get());
+                }
             }
         }
 
         return capabilities;
     }
 
+    private static Optional<YangModuleCapability> moduleToCapability(final Module module) {
+        final String source = module.getSource();
+        if(source !=null) {
+            return Optional.of(new YangModuleCapability(module, source));
+        } else {
+            LOG.warn("Missing source for module {}. This module will not be available from netconf server",
+                    module);
+        }
+        return Optional.absent();
+    }
+
     @Override
     public AutoCloseable registerCapabilityListener(final CapabilityListener listener) {
         return currentSchemaContext.registerCapabilityListener(listener);
     }
 
+    @Override
+    public void onSessionInitiated(ConsumerSession session) {
+        this.session = Preconditions.checkNotNull(session);
+        this.dataBroker = this.session.getService(DOMDataBroker.class);
+        this.rpcService = this.session.getService(DOMRpcService.class);
+    }
+
+    @Override
+    public Collection<ConsumerFunctionality> getConsumerFunctionality() {
+        return Collections.emptySet();
+    }
 }
index c881ae2e4e929b3f51763fa73b49de065619a56a..8403dccc72f3947b1eb9a972c35a25d2d694949b 100644 (file)
@@ -11,11 +11,13 @@ package org.opendaylight.controller.netconf.mdsal.connector;
 import com.google.common.collect.Sets;
 import java.util.Set;
 import org.opendaylight.controller.md.sal.dom.api.DOMDataBroker;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
 import org.opendaylight.controller.netconf.mapping.api.NetconfOperation;
 import org.opendaylight.controller.netconf.mdsal.connector.ops.Commit;
 import org.opendaylight.controller.netconf.mdsal.connector.ops.DiscardChanges;
 import org.opendaylight.controller.netconf.mdsal.connector.ops.EditConfig;
 import org.opendaylight.controller.netconf.mdsal.connector.ops.Lock;
+import org.opendaylight.controller.netconf.mdsal.connector.ops.RuntimeRpc;
 import org.opendaylight.controller.netconf.mdsal.connector.ops.Unlock;
 import org.opendaylight.controller.netconf.mdsal.connector.ops.get.Get;
 import org.opendaylight.controller.netconf.mdsal.connector.ops.get.GetConfig;
@@ -25,14 +27,16 @@ final class OperationProvider {
     private final String netconfSessionIdForReporting;
     private final CurrentSchemaContext schemaContext;
     private final DOMDataBroker dataBroker;
+    private final DOMRpcService rpcService;
     private final TransactionProvider transactionProvider;
 
-    public OperationProvider(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext, final DOMDataBroker dataBroker) {
+    public OperationProvider(final String netconfSessionIdForReporting, final CurrentSchemaContext schemaContext,
+                             final DOMDataBroker dataBroker, final DOMRpcService rpcService) {
         this.netconfSessionIdForReporting = netconfSessionIdForReporting;
         this.schemaContext = schemaContext;
         this.dataBroker = dataBroker;
-        this.transactionProvider = new TransactionProvider(dataBroker, netconfSessionIdForReporting);
-
+        this.rpcService = rpcService;
+        this.transactionProvider = new TransactionProvider(this.dataBroker, netconfSessionIdForReporting);
     }
 
     Set<NetconfOperation> getOperations() {
@@ -43,7 +47,8 @@ final class OperationProvider {
                 new Get(netconfSessionIdForReporting, schemaContext, transactionProvider),
                 new GetConfig(netconfSessionIdForReporting, schemaContext, transactionProvider),
                 new Lock(netconfSessionIdForReporting),
-                new Unlock(netconfSessionIdForReporting)
+                new Unlock(netconfSessionIdForReporting),
+                new RuntimeRpc(netconfSessionIdForReporting, schemaContext, rpcService)
         );
     }
 
index 5a980c44d3e80276979d5dcd9a08ca3e85c69e75..47e8f80585cdfc9411c15856722149906610d145 100644 (file)
@@ -12,7 +12,7 @@ import com.google.common.base.Optional;
 import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
 import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
 import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
 import org.opendaylight.controller.netconf.util.xml.XmlElement;
 import org.opendaylight.controller.netconf.util.xml.XmlUtil;
 import org.slf4j.Logger;
@@ -20,7 +20,7 @@ import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 
-public class Commit extends AbstractLastNetconfOperation{
+public class Commit extends AbstractSingletonNetconfOperation {
 
     private static final Logger LOG = LoggerFactory.getLogger(Commit.class);
 
index b47bb1843494aeb2051a5b551dec7f4cc7ace04c..ce4de18ee6b5ef5554232d566d8259cba251eca5 100644 (file)
@@ -17,7 +17,7 @@ import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorT
 import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
 import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
 import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
 import org.opendaylight.controller.netconf.util.xml.XmlElement;
 import org.opendaylight.controller.netconf.util.xml.XmlUtil;
 import org.slf4j.Logger;
@@ -25,7 +25,7 @@ import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 
-public class DiscardChanges extends AbstractLastNetconfOperation{
+public class DiscardChanges extends AbstractSingletonNetconfOperation {
 
     private static final Logger LOG = LoggerFactory.getLogger(DiscardChanges.class);
 
@@ -59,4 +59,5 @@ public class DiscardChanges extends AbstractLastNetconfOperation{
     protected String getOperationName() {
         return OPERATION_NAME;
     }
+
 }
index aebdfd9baf5d7dd944fbdba5e7253854556218c2..fbefb5c56d818ffd3e039cea449202530a113e42 100644 (file)
@@ -25,7 +25,7 @@ import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
 import org.opendaylight.controller.netconf.mdsal.connector.TransactionProvider;
 import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
 import org.opendaylight.controller.netconf.util.exception.UnexpectedNamespaceException;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
 import org.opendaylight.controller.netconf.util.xml.XmlElement;
 import org.opendaylight.controller.netconf.util.xml.XmlUtil;
 import org.opendaylight.yangtools.yang.data.api.ModifyAction;
@@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 
-public class EditConfig extends AbstractLastNetconfOperation {
+public class EditConfig extends AbstractSingletonNetconfOperation {
 
     private static final Logger LOG = LoggerFactory.getLogger(EditConfig.class);
 
@@ -229,4 +229,5 @@ public class EditConfig extends AbstractLastNetconfOperation {
     protected String getOperationName() {
         return OPERATION_NAME;
     }
+
 }
index db912c5fc0b2b34d438f1a661eaa070a09888a57..ef94f69f7a05099695cde5d4a6172ec3ca5f9b22 100644 (file)
@@ -13,7 +13,7 @@ import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
 import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
 import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
 import org.opendaylight.controller.netconf.util.exception.UnexpectedNamespaceException;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
 import org.opendaylight.controller.netconf.util.xml.XmlElement;
 import org.opendaylight.controller.netconf.util.xml.XmlUtil;
 import org.slf4j.Logger;
@@ -21,7 +21,7 @@ import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 
-public class Lock extends AbstractLastNetconfOperation{
+public class Lock extends AbstractSingletonNetconfOperation {
 
     private static final Logger LOG = LoggerFactory.getLogger(Lock.class);
 
@@ -61,4 +61,5 @@ public class Lock extends AbstractLastNetconfOperation{
     protected String getOperationName() {
         return OPERATION_NAME;
     }
+
 }
diff --git a/opendaylight/netconf/mdsal-netconf-connector/src/main/java/org/opendaylight/controller/netconf/mdsal/connector/ops/RuntimeRpc.java b/opendaylight/netconf/mdsal-netconf-connector/src/main/java/org/opendaylight/controller/netconf/mdsal/connector/ops/RuntimeRpc.java
new file mode 100644 (file)
index 0000000..ff7d30d
--- /dev/null
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops;
+
+import com.google.common.base.Optional;
+import com.google.common.base.Throwables;
+import com.google.common.util.concurrent.CheckedFuture;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Collections;
+import java.util.Map;
+import javax.annotation.Nullable;
+import javax.xml.stream.XMLOutputFactory;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.stream.XMLStreamWriter;
+import javax.xml.transform.dom.DOMResult;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
+import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier.PathArgument;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.api.schema.stream.NormalizedNodeWriter;
+import org.opendaylight.yangtools.yang.data.impl.codec.xml.XMLStreamNormalizedNodeStreamWriter;
+import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.DomUtils;
+import org.opendaylight.yangtools.yang.data.impl.schema.transform.dom.parser.DomToNormalizedNodeParserFactory;
+import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Attr;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Node;
+import org.w3c.dom.NodeList;
+
+public class RuntimeRpc extends AbstractSingletonNetconfOperation {
+
+    private static final Logger LOG = LoggerFactory.getLogger(RuntimeRpc.class);
+
+    private final CurrentSchemaContext schemaContext;
+    private static final XMLOutputFactory XML_OUTPUT_FACTORY;
+
+    static {
+        XML_OUTPUT_FACTORY = XMLOutputFactory.newFactory();
+        XML_OUTPUT_FACTORY.setProperty(XMLOutputFactory.IS_REPAIRING_NAMESPACES, true);
+    }
+
+    private final DOMRpcService rpcService;
+
+    public RuntimeRpc(final String netconfSessionIdForReporting, CurrentSchemaContext schemaContext, DOMRpcService rpcService) {
+        super(netconfSessionIdForReporting);
+        this.schemaContext = schemaContext;
+        this.rpcService = rpcService;
+    }
+
+    @Override
+    protected HandlingPriority canHandle(final String netconfOperationName, final String namespace) {
+        final URI namespaceURI = createNsUri(namespace);
+        final Optional<Module> module = getModule(namespaceURI);
+
+        if (!module.isPresent()) {
+            LOG.debug("Cannot handle rpc: {}, {}", netconfOperationName, namespace);
+            return HandlingPriority.CANNOT_HANDLE;
+        }
+
+        getRpcDefinitionFromModule(module.get(), namespaceURI, netconfOperationName);
+        return HandlingPriority.HANDLE_WITH_DEFAULT_PRIORITY;
+
+    }
+
+    @Override
+    protected String getOperationName() {
+        throw new UnsupportedOperationException("Runtime rpc does not have a stable name");
+    }
+
+    private URI createNsUri(final String namespace) {
+        final URI namespaceURI;
+        try {
+            namespaceURI = new URI(namespace);
+        } catch (URISyntaxException e) {
+            // Cannot occur, namespace in parsed XML cannot be invalid URI
+            throw new IllegalStateException("Unable to parse URI " + namespace, e);
+        }
+        return namespaceURI;
+    }
+
+    //this returns module with the newest revision if more then 1 module with same namespace is found
+    private Optional<Module> getModule(final URI namespaceURI) {
+        return Optional.of(schemaContext.getCurrentContext().findModuleByNamespaceAndRevision(namespaceURI, null));
+    }
+
+    private Optional<RpcDefinition> getRpcDefinitionFromModule(Module module, URI namespaceURI, String name) {
+        for (RpcDefinition rpcDef : module.getRpcs()) {
+            if (rpcDef.getQName().getNamespace().equals(namespaceURI)
+                    && rpcDef.getQName().getLocalName().equals(name)) {
+                return Optional.of(rpcDef);
+            }
+        }
+        return Optional.absent();
+    }
+
+    @Override
+    protected Element handleWithNoSubsequentOperations(final Document document, final XmlElement operationElement) throws NetconfDocumentedException {
+
+        final String netconfOperationName = operationElement.getName();
+        final String netconfOperationNamespace;
+        try {
+            netconfOperationNamespace = operationElement.getNamespace();
+        } catch (MissingNameSpaceException e) {
+            LOG.debug("Cannot retrieve netconf operation namespace from message due to ", e);
+            throw new NetconfDocumentedException("Cannot retrieve netconf operation namespace from message",
+                    ErrorType.protocol, ErrorTag.unknown_namespace, ErrorSeverity.error);
+        }
+
+        final URI namespaceURI = createNsUri(netconfOperationNamespace);
+        final Optional<Module> moduleOptional = getModule(namespaceURI);
+
+        if (!moduleOptional.isPresent()) {
+            throw new NetconfDocumentedException("Unable to find module in Schema Context with namespace and name : " +
+                    namespaceURI + " " + netconfOperationName + schemaContext.getCurrentContext(),
+                    ErrorType.application, ErrorTag.bad_element, ErrorSeverity.error);
+        }
+
+        final Optional<RpcDefinition> rpcDefinitionOptional = getRpcDefinitionFromModule(moduleOptional.get(), namespaceURI, netconfOperationName);
+
+        if (!rpcDefinitionOptional.isPresent()) {
+            throw new NetconfDocumentedException("Unable to find RpcDefinition with namespace and name : " + namespaceURI + " " + netconfOperationName,
+                    ErrorType.application, ErrorTag.bad_element, ErrorSeverity.error);
+        }
+
+        final RpcDefinition rpcDefinition = rpcDefinitionOptional.get();
+        final SchemaPath schemaPath = SchemaPath.create(Collections.singletonList(rpcDefinition.getQName()), true);
+        final NormalizedNode<?, ?> inputNode = rpcToNNode(operationElement, rpcDefinition.getInput());
+
+        final CheckedFuture<DOMRpcResult, DOMRpcException> rpcFuture = rpcService.invokeRpc(schemaPath, inputNode);
+        try {
+            final DOMRpcResult result = rpcFuture.checkedGet();
+            if (result.getResult() == null) {
+                return XmlUtil.createElement(document, XmlNetconfConstants.OK, Optional.of(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0));
+            }
+            return (Element) transformNormalizedNode(document, result.getResult(), rpcDefinition.getOutput().getPath());
+        } catch (DOMRpcException e) {
+            throw NetconfDocumentedException.wrap(e);
+        }
+    }
+
+    @Override
+    public Document handle(final Document requestMessage,
+                           final NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+
+        final XmlElement requestElement = getRequestElementWithCheck(requestMessage);
+
+        final Document document = XmlUtil.newDocument();
+
+        final XmlElement operationElement = requestElement.getOnlyChildElement();
+        final Map<String, Attr> attributes = requestElement.getAttributes();
+
+        final Element response = handle(document, operationElement, subsequentOperation);
+        final Element rpcReply = XmlUtil.createElement(document, XmlNetconfConstants.RPC_REPLY_KEY, Optional.of(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0));
+
+        if(XmlElement.fromDomElement(response).hasNamespace()) {
+            rpcReply.appendChild(response);
+        } else {
+            final NodeList list = response.getChildNodes();
+            if (list.getLength() == 0) {
+                rpcReply.appendChild(response);
+            } else {
+                while (list.getLength() != 0) {
+                    rpcReply.appendChild(list.item(0));
+                }
+            }
+        }
+
+        for (Attr attribute : attributes.values()) {
+            rpcReply.setAttributeNode((Attr) document.importNode(attribute, true));
+        }
+        document.appendChild(rpcReply);
+        return document;
+    }
+
+    //TODO move all occurences of this method in mdsal netconf(and xml factories) to a utility class
+    private Node transformNormalizedNode(final Document document, final NormalizedNode<?, ?> data, final SchemaPath rpcOutputPath) {
+        final DOMResult result = new DOMResult(document.createElement(XmlNetconfConstants.RPC_REPLY_KEY));
+
+        final XMLStreamWriter xmlWriter = getXmlStreamWriter(result);
+
+        final NormalizedNodeStreamWriter nnStreamWriter = XMLStreamNormalizedNodeStreamWriter.create(xmlWriter,
+                schemaContext.getCurrentContext(), rpcOutputPath);
+
+        final NormalizedNodeWriter nnWriter = NormalizedNodeWriter.forStreamWriter(nnStreamWriter);
+
+        writeRootElement(xmlWriter, nnWriter, (ContainerNode) data);
+        try {
+            nnStreamWriter.close();
+            xmlWriter.close();
+        } catch (IOException | XMLStreamException e) {
+            LOG.warn("Error while closing streams", e);
+        }
+
+        return result.getNode();
+    }
+
+    private XMLStreamWriter getXmlStreamWriter(final DOMResult result) {
+        try {
+            return XML_OUTPUT_FACTORY.createXMLStreamWriter(result);
+        } catch (final XMLStreamException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private void writeRootElement(final XMLStreamWriter xmlWriter, final NormalizedNodeWriter nnWriter, final ContainerNode data) {
+        try {
+            for (final DataContainerChild<? extends PathArgument, ?> child : data.getValue()) {
+                nnWriter.write(child);
+            }
+            nnWriter.flush();
+            xmlWriter.flush();
+        } catch (XMLStreamException | IOException e) {
+            Throwables.propagate(e);
+        }
+    }
+
+    /**
+     * Parses xml element rpc input into normalized node or null if rpc does not take any input
+     * @param oElement rpc xml element
+     * @param input input container schema node, or null if rpc does not take any input
+     * @return parsed rpc into normalized node, or null if input schema is null
+     */
+    @Nullable
+    private NormalizedNode<?, ?> rpcToNNode(final XmlElement oElement, @Nullable final ContainerSchemaNode input) {
+        return input == null ? null : DomToNormalizedNodeParserFactory
+                .getInstance(DomUtils.defaultValueCodecProvider(), schemaContext.getCurrentContext())
+                .getContainerNodeParser()
+                .parse(Collections.singletonList(oElement.getDomElement()), input);
+    }
+
+}
index 2dd26633dd53b9137e77cc06d9ddf61dd93a35bb..08ffe8b2a0ca7f1fa2cda74adcd15d69cc9efa90 100644 (file)
@@ -11,7 +11,7 @@ package org.opendaylight.controller.netconf.mdsal.connector.ops;
 import com.google.common.base.Optional;
 import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
 import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
 import org.opendaylight.controller.netconf.util.xml.XmlElement;
 import org.opendaylight.controller.netconf.util.xml.XmlUtil;
 import org.slf4j.Logger;
@@ -19,7 +19,7 @@ import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 
-public class Unlock extends AbstractLastNetconfOperation{
+public class Unlock extends AbstractSingletonNetconfOperation {
 
     private static final Logger LOG = LoggerFactory.getLogger(Unlock.class);
 
index 711cb8145b7d0ba110e552ca5a106658aa835747..9a66ceb5bcd0d8dc66fc77cfa2f215a37e77d223 100644 (file)
@@ -29,7 +29,7 @@ import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorT
 import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
 import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
 import org.opendaylight.controller.netconf.mdsal.connector.ops.Datastore;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
 import org.opendaylight.controller.netconf.util.xml.XmlElement;
 import org.opendaylight.controller.sal.connect.netconf.util.InstanceIdToNodes;
 import org.opendaylight.yangtools.yang.common.QName;
@@ -56,7 +56,7 @@ import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.w3c.dom.Node;
 
-public abstract class AbstractGet extends AbstractLastNetconfOperation {
+public abstract class AbstractGet extends AbstractSingletonNetconfOperation {
 
     private static final Logger LOG = LoggerFactory.getLogger(AbstractGet.class);
 
index 9d9966e8f1d5e922c193c80140297069782741ee..c787287da5f81660edea616d4a0455b1d8e3b25a 100644 (file)
@@ -39,8 +39,8 @@ module netconf-mdsal-mapper {
             container dom-broker {
                 uses config:service-ref {
                     refine type {
-                        mandatory false;
-                        config:required-identity md-sal-dom:dom-async-data-broker;
+                        mandatory true;
+                        config:required-identity md-sal-dom:dom-broker-osgi-registry;
                     }
                 }
             }
diff --git a/opendaylight/netconf/mdsal-netconf-connector/src/test/java/org/opendaylight/controller/netconf/mdsal/connector/ops/RuntimeRpcTest.java b/opendaylight/netconf/mdsal-netconf-connector/src/test/java/org/opendaylight/controller/netconf/mdsal/connector/ops/RuntimeRpcTest.java
new file mode 100644 (file)
index 0000000..32eb08c
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+ * Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Eclipse Public License v1.0 which accompanies this distribution,
+ * and is available at http://www.eclipse.org/legal/epl-v10.html
+ */
+
+package org.opendaylight.controller.netconf.mdsal.connector.ops;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.MockitoAnnotations.initMocks;
+
+import com.google.common.base.Preconditions;
+import com.google.common.io.ByteSource;
+import com.google.common.util.concurrent.CheckedFuture;
+import com.google.common.util.concurrent.Futures;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import javax.annotation.Nonnull;
+import javax.annotation.Nullable;
+import org.custommonkey.xmlunit.DetailedDiff;
+import org.custommonkey.xmlunit.Diff;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.custommonkey.xmlunit.examples.RecursiveElementNameAndTextQualifier;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mock;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcAvailabilityListener;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcException;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcResult;
+import org.opendaylight.controller.md.sal.dom.api.DOMRpcService;
+import org.opendaylight.controller.md.sal.dom.spi.DefaultDOMRpcResult;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorSeverity;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorTag;
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException.ErrorType;
+import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.mdsal.connector.CurrentSchemaContext;
+import org.opendaylight.controller.netconf.util.test.XmlFileLoader;
+import org.opendaylight.controller.netconf.util.xml.XmlUtil;
+import org.opendaylight.controller.sal.core.api.model.SchemaService;
+import org.opendaylight.yangtools.concepts.ListenerRegistration;
+import org.opendaylight.yangtools.yang.common.RpcError;
+import org.opendaylight.yangtools.yang.data.api.YangInstanceIdentifier;
+import org.opendaylight.yangtools.yang.data.api.schema.ContainerNode;
+import org.opendaylight.yangtools.yang.data.api.schema.DataContainerChild;
+import org.opendaylight.yangtools.yang.data.api.schema.NormalizedNode;
+import org.opendaylight.yangtools.yang.data.impl.schema.builder.impl.ImmutableContainerNodeBuilder;
+import org.opendaylight.yangtools.yang.model.api.ContainerSchemaNode;
+import org.opendaylight.yangtools.yang.model.api.Module;
+import org.opendaylight.yangtools.yang.model.api.RpcDefinition;
+import org.opendaylight.yangtools.yang.model.api.SchemaContext;
+import org.opendaylight.yangtools.yang.model.api.SchemaContextListener;
+import org.opendaylight.yangtools.yang.model.api.SchemaPath;
+import org.opendaylight.yangtools.yang.model.parser.api.YangSyntaxErrorException;
+import org.opendaylight.yangtools.yang.parser.builder.impl.BuilderUtils;
+import org.opendaylight.yangtools.yang.parser.impl.YangParserImpl;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.w3c.dom.Document;
+
+public class RuntimeRpcTest {
+
+    private static final Logger LOG = LoggerFactory.getLogger(RuntimeRpcTest.class);
+
+    private String sessionIdForReporting = "netconf-test-session1";
+
+    private static Document RPC_REPLY_OK = null;
+
+    static {
+        try {
+            RPC_REPLY_OK = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/runtimerpc-ok-reply.xml");
+        } catch (Exception e) {
+            LOG.debug("unable to load rpc reply ok.", e);
+            RPC_REPLY_OK = XmlUtil.newDocument();
+        }
+    }
+
+    private DOMRpcService rpcServiceVoidInvoke = new DOMRpcService() {
+        @Nonnull
+        @Override
+        public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(@Nonnull SchemaPath type, @Nullable NormalizedNode<?, ?> input) {
+            return Futures.immediateCheckedFuture((DOMRpcResult) new DefaultDOMRpcResult(null, Collections.<RpcError>emptyList()));
+        }
+
+        @Nonnull
+        @Override
+        public <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(@Nonnull T listener) {
+            return null;
+        }
+    };
+
+    private DOMRpcService rpcServiceFailedInvocation = new DOMRpcService() {
+        @Nonnull
+        @Override
+        public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(@Nonnull SchemaPath type, @Nullable NormalizedNode<?, ?> input) {
+            return Futures.immediateFailedCheckedFuture((DOMRpcException) new DOMRpcException("rpc invocation not implemented yet") {
+            });
+        }
+
+        @Nonnull
+        @Override
+        public <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(@Nonnull T listener) {
+            return null;
+        }
+    };
+
+    private DOMRpcService rpcServiceSuccesfullInvocation = new DOMRpcService() {
+        @Nonnull
+        @Override
+        public CheckedFuture<DOMRpcResult, DOMRpcException> invokeRpc(@Nonnull SchemaPath type, @Nullable NormalizedNode<?, ?> input) {
+            Collection<DataContainerChild<? extends YangInstanceIdentifier.PathArgument, ?>> children = (Collection) input.getValue();
+            Module module = schemaContext.findModuleByNamespaceAndRevision(type.getLastComponent().getNamespace(), null);
+            RpcDefinition rpcDefinition = getRpcDefinitionFromModule(module, module.getNamespace(), type.getLastComponent().getLocalName());
+            ContainerSchemaNode outputSchemaNode = rpcDefinition.getOutput();
+            ContainerNode node = ImmutableContainerNodeBuilder.create()
+                    .withNodeIdentifier(new YangInstanceIdentifier.NodeIdentifier(outputSchemaNode.getQName()))
+                    .withValue(children).build();
+
+            return Futures.immediateCheckedFuture((DOMRpcResult) new DefaultDOMRpcResult(node));
+        }
+
+        @Nonnull
+        @Override
+        public <T extends DOMRpcAvailabilityListener> ListenerRegistration<T> registerRpcListener(@Nonnull T listener) {
+            return null;
+        }
+    };
+
+    private SchemaContext schemaContext = null;
+    private CurrentSchemaContext currentSchemaContext = null;
+    @Mock
+    private SchemaService schemaService;
+    @Mock
+    private SchemaContextListener listener;
+    @Mock
+    private ListenerRegistration registration;
+
+    @Before
+    public void setUp() throws Exception {
+
+        initMocks(this);
+        doNothing().when(registration).close();
+        doReturn(listener).when(registration).getInstance();
+        doNothing().when(schemaService).addModule(any(Module.class));
+        doNothing().when(schemaService).removeModule(any(Module.class));
+        doReturn(schemaContext).when(schemaService).getGlobalContext();
+        doReturn(schemaContext).when(schemaService).getSessionContext();
+        doAnswer(new Answer() {
+            @Override
+            public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
+                ((SchemaContextListener) invocationOnMock.getArguments()[0]).onGlobalContextUpdated(schemaContext);
+                return registration;
+            }
+        }).when(schemaService).registerSchemaContextListener(any(SchemaContextListener.class));
+
+        XMLUnit.setIgnoreWhitespace(true);
+        XMLUnit.setIgnoreAttributeOrder(true);
+
+        this.schemaContext = parseSchemas(getYangSchemas());
+        this.currentSchemaContext = new CurrentSchemaContext(schemaService);
+    }
+
+    @Test
+    public void testVoidOutputRpc() throws Exception {
+        RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceVoidInvoke);
+
+        Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-void-output.xml");
+        HandlingPriority priority = rpc.canHandle(rpcDocument);
+        Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+        Document response = rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+
+        verifyResponse(response, RPC_REPLY_OK);
+    }
+
+    @Test
+    public void testSuccesfullNonVoidInvocation() throws Exception {
+        RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceSuccesfullInvocation);
+
+        Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-nonvoid.xml");
+        HandlingPriority priority = rpc.canHandle(rpcDocument);
+        Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+        Document response = rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+        verifyResponse(response, XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-nonvoid-control.xml"));
+    }
+
+    @Test
+    public void testFailedInvocation() throws Exception {
+        RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceFailedInvocation);
+
+        Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-nonvoid.xml");
+        HandlingPriority priority = rpc.canHandle(rpcDocument);
+        Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+        try {
+            rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+            fail("should have failed with rpc invocation not implemented yet");
+        } catch (NetconfDocumentedException e) {
+            assertTrue(e.getErrorType() == ErrorType.application);
+            assertTrue(e.getErrorSeverity() == ErrorSeverity.error);
+            assertTrue(e.getErrorTag() == ErrorTag.operation_failed);
+        }
+    }
+
+    @Test
+    public void testVoidInputOutputRpc() throws Exception {
+        RuntimeRpc rpc = new RuntimeRpc(sessionIdForReporting, currentSchemaContext, rpcServiceVoidInvoke);
+
+        Document rpcDocument = XmlFileLoader.xmlFileToDocument("messages/mapping/rpcs/rpc-void-input-output.xml");
+        HandlingPriority priority = rpc.canHandle(rpcDocument);
+        Preconditions.checkState(priority != HandlingPriority.CANNOT_HANDLE);
+
+        Document response = rpc.handle(rpcDocument, NetconfOperationChainedExecution.EXECUTION_TERMINATION_POINT);
+
+        verifyResponse(response, RPC_REPLY_OK);
+    }
+
+    private void verifyResponse(Document response, Document template) {
+        DetailedDiff dd = new DetailedDiff(new Diff(response, template));
+        dd.overrideElementQualifier(new RecursiveElementNameAndTextQualifier());
+        assertTrue(dd.similar());
+    }
+
+    private RpcDefinition getRpcDefinitionFromModule(Module module, URI namespaceURI, String name) {
+        for (RpcDefinition rpcDef : module.getRpcs()) {
+            if (rpcDef.getQName().getNamespace().equals(namespaceURI)
+                    && rpcDef.getQName().getLocalName().equals(name)) {
+                return rpcDef;
+            }
+        }
+
+        return null;
+
+    }
+
+    private Collection<InputStream> getYangSchemas() {
+        final List<String> schemaPaths = Arrays.asList("/yang/mdsal-netconf-rpc-test.yang");
+        final List<InputStream> schemas = new ArrayList<>();
+
+        for (String schemaPath : schemaPaths) {
+            InputStream resourceAsStream = getClass().getResourceAsStream(schemaPath);
+            schemas.add(resourceAsStream);
+        }
+
+        return schemas;
+    }
+
+    private SchemaContext parseSchemas(Collection<InputStream> schemas) throws IOException, YangSyntaxErrorException {
+        final YangParserImpl parser = new YangParserImpl();
+        Collection<ByteSource> sources = BuilderUtils.streamsToByteSources(schemas);
+        return parser.parseSources(sources);
+    }
+}
\ No newline at end of file
diff --git a/opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/rpc-nonvoid-control.xml b/opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/rpc-nonvoid-control.xml
new file mode 100644 (file)
index 0000000..139885b
--- /dev/null
@@ -0,0 +1,17 @@
+<!--
+  ~ Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+  ~
+  ~ This program and the accompanying materials are made available under the
+  ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+  ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+  -->
+
+<rpc-reply message-id="2"
+     xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+    <test-string xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+        test rpc input string 1
+    </test-string>
+    <test-string2 xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+        test rpc input string 2
+    </test-string2>
+</rpc-reply>
\ No newline at end of file
diff --git a/opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/rpc-nonvoid.xml b/opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/rpc-nonvoid.xml
new file mode 100644 (file)
index 0000000..b5cc5ec
--- /dev/null
@@ -0,0 +1,19 @@
+<!--
+  ~ Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+  ~
+  ~ This program and the accompanying materials are made available under the
+  ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+  ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+  -->
+
+<rpc message-id="2"
+     xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+    <nonvoid-rpc xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+        <test-string>
+            test rpc input string 1
+        </test-string>
+        <test-string2>
+            test rpc input string 2
+        </test-string2>
+    </nonvoid-rpc>
+</rpc>
\ No newline at end of file
diff --git a/opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/rpc-void-input-output.xml b/opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/rpc-void-input-output.xml
new file mode 100644 (file)
index 0000000..c6b09f8
--- /dev/null
@@ -0,0 +1,12 @@
+<!--
+  ~ Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+  ~
+  ~ This program and the accompanying materials are made available under the
+  ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+  ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+  -->
+
+<rpc message-id="2"
+     xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+    <void-input-output-rpc xmlns="urn:opendaylight:mdsal:mapping:rpc:test"/>
+</rpc>
\ No newline at end of file
diff --git a/opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/rpc-void-output.xml b/opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/rpc-void-output.xml
new file mode 100644 (file)
index 0000000..a963865
--- /dev/null
@@ -0,0 +1,19 @@
+<!--
+  ~ Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+  ~
+  ~ This program and the accompanying materials are made available under the
+  ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+  ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+  -->
+
+<rpc message-id="2"
+     xmlns="urn:ietf:params:xml:ns:netconf:base:1.0">
+    <void-output-rpc xmlns="urn:opendaylight:mdsal:mapping:rpc:test">
+        <test-string>
+            test rpc input string 1
+        </test-string>
+        <test-string2>
+            test rpc input string 2
+        </test-string2>
+    </void-output-rpc>
+</rpc>
\ No newline at end of file
diff --git a/opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/runtimerpc-ok-reply.xml b/opendaylight/netconf/mdsal-netconf-connector/src/test/resources/messages/mapping/rpcs/runtimerpc-ok-reply.xml
new file mode 100644 (file)
index 0000000..e44046e
--- /dev/null
@@ -0,0 +1,11 @@
+<!--
+  ~ Copyright (c) 2015 Cisco Systems, Inc. and others.  All rights reserved.
+  ~
+  ~ This program and the accompanying materials are made available under the
+  ~ terms of the Eclipse Public License v1.0 which accompanies this distribution,
+  ~ and is available at http://www.eclipse.org/legal/epl-v10.html
+  -->
+
+<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" message-id="2">
+    <ok/>
+</rpc-reply>
\ No newline at end of file
diff --git a/opendaylight/netconf/mdsal-netconf-connector/src/test/resources/yang/mdsal-netconf-rpc-test.yang b/opendaylight/netconf/mdsal-netconf-connector/src/test/resources/yang/mdsal-netconf-rpc-test.yang
new file mode 100644 (file)
index 0000000..d493840
--- /dev/null
@@ -0,0 +1,44 @@
+module rpc-test {
+    yang-version 1;
+    namespace "urn:opendaylight:mdsal:mapping:rpc:test";
+    prefix "rpc";
+
+    rpc void-input-output-rpc {
+
+    }
+
+    rpc void-output-rpc {
+        input {
+            leaf test-string {
+                type string;
+            }
+
+            leaf test-string2 {
+                type string;
+            }
+        }
+    }
+
+    rpc nonvoid-rpc {
+        input {
+            leaf test-string {
+                type string;
+            }
+
+            leaf test-string2 {
+                type string;
+            }
+        }
+
+        output {
+            leaf test-string {
+                type string;
+            }
+
+            leaf test-string2 {
+                type string;
+            }
+        }
+    }
+}
+
index 06c695c25a1941d74877234a7c030a45b34ac7e1..f4017fbe5897521e5589f36e987abdc454cfb895 100644 (file)
@@ -8,7 +8,7 @@
 
 package org.opendaylight.controller.netconf.client;
 
-import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableList;
 import io.netty.channel.Channel;
 import io.netty.channel.ChannelFuture;
@@ -45,6 +45,9 @@ public class NetconfClientSessionNegotiator extends
     private static final XPathExpression sessionIdXPath = XMLNetconfUtil
             .compileXPath("/netconf:hello/netconf:session-id");
 
+    private static final XPathExpression sessionIdXPathNoNamespace = XMLNetconfUtil
+            .compileXPath("/hello/session-id");
+
     private static final String EXI_1_0_CAPABILITY_MARKER = "exi:1.0";
 
     protected NetconfClientSessionNegotiator(final NetconfClientSessionPreferences sessionPreferences,
@@ -113,16 +116,22 @@ public class NetconfClientSessionNegotiator extends
     }
 
     private long extractSessionId(final Document doc) {
-        final Node sessionIdNode = (Node) XmlUtil.evaluateXPath(sessionIdXPath, doc, XPathConstants.NODE);
-        Preconditions.checkState(sessionIdNode != null, "");
-        String textContent = sessionIdNode.getTextContent();
-        if (textContent == null || textContent.equals("")) {
-            throw new IllegalStateException("Session id not received from server");
+        String textContent = getSessionIdWithXPath(doc, sessionIdXPath);
+        if (Strings.isNullOrEmpty(textContent)) {
+            textContent = getSessionIdWithXPath(doc, sessionIdXPathNoNamespace);
+            if (Strings.isNullOrEmpty(textContent)) {
+                throw new IllegalStateException("Session id not received from server, hello message: " + XmlUtil.toString(doc));
+            }
         }
 
         return Long.valueOf(textContent);
     }
 
+    private String getSessionIdWithXPath(final Document doc, final XPathExpression sessionIdXPath) {
+        final Node sessionIdNode = (Node) XmlUtil.evaluateXPath(sessionIdXPath, doc, XPathConstants.NODE);
+        return sessionIdNode != null ? sessionIdNode.getTextContent() : null;
+    }
+
     @Override
     protected NetconfClientSession getSession(final NetconfClientSessionListener sessionListener, final Channel channel,
             final NetconfHelloMessage message) throws NetconfDocumentedException {
index 4ca3c99e813c0711865a3ef704f60e52014c8c93..1982615173ec2aa7d36a39b7eb0733f759d55299 100644 (file)
@@ -22,8 +22,8 @@
                   <name>yang-schema-service</name>
               </root-schema-service>
               <dom-broker xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:mapper">
-                  <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-async-data-broker</type>
-                  <name>inmemory-data-broker</name>
+                  <type xmlns:dom="urn:opendaylight:params:xml:ns:yang:controller:md:sal:dom">dom:dom-broker-osgi-registry</type>
+                  <name>dom-broker</name>
               </dom-broker>
               <mapper-aggregator xmlns="urn:opendaylight:params:xml:ns:yang:controller:netconf:mdsal:mapper">
                   <type xmlns:prefix="urn:opendaylight:params:xml:ns:yang:controller:netconf:north:mapper">prefix:netconf-mapper-registry</type>
index d02cb432cbe7411898d93ecee33afff4206b20bb..961c9f57c2e9bc71463a472fde46cfd5f77ec166 100644 (file)
@@ -15,7 +15,7 @@ import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
 import org.opendaylight.controller.netconf.api.monitoring.NetconfMonitoringService;
 import org.opendaylight.controller.netconf.api.xml.XmlNetconfConstants;
 import org.opendaylight.controller.netconf.util.exception.MissingNameSpaceException;
-import org.opendaylight.controller.netconf.util.mapping.AbstractLastNetconfOperation;
+import org.opendaylight.controller.netconf.util.mapping.AbstractSingletonNetconfOperation;
 import org.opendaylight.controller.netconf.util.xml.XmlElement;
 import org.opendaylight.controller.netconf.util.xml.XmlUtil;
 import org.slf4j.Logger;
@@ -23,7 +23,7 @@ import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 
-public class GetSchema extends AbstractLastNetconfOperation {
+public class GetSchema extends AbstractSingletonNetconfOperation {
     public static final String GET_SCHEMA = "get-schema";
     public static final String IDENTIFIER = "identifier";
     public static final String VERSION = "version";
index cda940f9b7775a6efab1c13d8f9885144246f485..064ae72bc7c512db762a45007a73e9aeebebbe45 100644 (file)
@@ -180,7 +180,7 @@ public class AsyncSshHandler extends ChannelOutboundHandlerAdapter {
 
     @Override
     public synchronized void connect(final ChannelHandlerContext ctx, final SocketAddress remoteAddress, final SocketAddress localAddress, final ChannelPromise promise) throws Exception {
-        LOG.debug("XXX session connecting on channel {}. promise: {} ", ctx.channel(), connectPromise);
+        LOG.debug("SSH session connecting on channel {}. promise: {} ", ctx.channel(), connectPromise);
         this.connectPromise = promise;
         startSsh(ctx, remoteAddress);
     }
index 4491e763b3b39cde75e71abffe1d06a987fd31ef..3e64e93ed74875b71738beca56db8d224b10dead 100644 (file)
@@ -7,7 +7,12 @@
  */
 package org.opendaylight.controller.netconf.util.mapping;
 
+import org.opendaylight.controller.netconf.api.NetconfDocumentedException;
 import org.opendaylight.controller.netconf.mapping.api.HandlingPriority;
+import org.opendaylight.controller.netconf.mapping.api.NetconfOperationChainedExecution;
+import org.opendaylight.controller.netconf.util.xml.XmlElement;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
 
 public abstract class AbstractSingletonNetconfOperation extends AbstractLastNetconfOperation {
 
@@ -15,6 +20,12 @@ public abstract class AbstractSingletonNetconfOperation extends AbstractLastNetc
         super(netconfSessionIdForReporting);
     }
 
+    @Override
+    protected Element handle(Document document, XmlElement operationElement,
+                             NetconfOperationChainedExecution subsequentOperation) throws NetconfDocumentedException {
+        return handleWithNoSubsequentOperations(document, operationElement);
+    }
+
     @Override
     protected HandlingPriority getHandlingPriority() {
         return HandlingPriority.HANDLE_WITH_MAX_PRIORITY;
index 5cd17a2331e293c23d064c9acd535e021de43c8e..404885db7e9a497a243abd12d468df7c48f8b6ae 100644 (file)
@@ -94,9 +94,9 @@ public final class NetconfHelloMessage extends NetconfMessage {
     private static boolean isHelloMessage(final Document document) {
         XmlElement element = XmlElement.fromDomElement(document.getDocumentElement());
         try {
+            // accept even if hello has no namespace
             return element.getName().equals(HELLO_TAG) &&
-                   element.hasNamespace() &&
-                   element.getNamespace().equals(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0);
+                    (!element.hasNamespace() || element.getNamespace().equals(XmlNetconfConstants.URN_IETF_PARAMS_XML_NS_NETCONF_BASE_1_0));
         } catch (MissingNameSpaceException e) {
             // Cannot happen, since we check for hasNamespace
             throw new IllegalStateException(e);
index 61b23202c3a134c27d975279e2f29f7d7eefefa0..3c6b6ccab9efa0a0a28b5630da5d5c9f30157610 100644 (file)
@@ -9,6 +9,7 @@
 package org.opendaylight.controller.netconf.util.messages;
 
 import com.google.common.base.Function;
+import com.google.common.base.Optional;
 import com.google.common.collect.Collections2;
 import java.util.Collection;
 import java.util.List;
@@ -59,9 +60,13 @@ public final class NetconfMessageUtil {
 
     public static Collection<String> extractCapabilitiesFromHello(Document doc) throws NetconfDocumentedException {
         XmlElement responseElement = XmlElement.fromDomDocument(doc);
-        XmlElement capabilitiesElement = responseElement
-                .getOnlyChildElementWithSameNamespace(XmlNetconfConstants.CAPABILITIES);
-        List<XmlElement> caps = capabilitiesElement.getChildElements(XmlNetconfConstants.CAPABILITY);
+        // Extract child element <capabilities> from <hello> with or without(fallback) the same namespace
+        Optional<XmlElement> capabilitiesElement = responseElement
+                .getOnlyChildElementWithSameNamespaceOptionally(XmlNetconfConstants.CAPABILITIES)
+                .or(responseElement
+                        .getOnlyChildElementOptionally(XmlNetconfConstants.CAPABILITIES));
+
+        List<XmlElement> caps = capabilitiesElement.get().getChildElements(XmlNetconfConstants.CAPABILITY);
         return Collections2.transform(caps, new Function<XmlElement, String>() {
 
             @Override
index 64aeebd54205c658cad64589994a436282cbdb5b..6855aa73c3685c2e1b1b40e7a42b37c6f653063d 100644 (file)
@@ -11,6 +11,7 @@ package org.opendaylight.controller.netconf.util.osgi;
 import com.google.common.base.Optional;
 import io.netty.channel.local.LocalAddress;
 import java.net.InetSocketAddress;
+import java.util.concurrent.TimeUnit;
 import org.osgi.framework.BundleContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -32,7 +33,7 @@ public final class NetconfConfigUtil {
     private static final String PRIVATE_KEY_PATH_PROP = ".pk.path";
 
     private static final String CONNECTION_TIMEOUT_MILLIS_PROP = "connectionTimeoutMillis";
-    private static final long DEFAULT_TIMEOUT_MILLIS = 5000;
+    public static final long DEFAULT_TIMEOUT_MILLIS = TimeUnit.SECONDS.toMillis(30);
     private static final LocalAddress netconfLocalAddress = new LocalAddress("netconf");
 
     public static LocalAddress getNetconfLocalAddress() {
index 1a701057aa0ac8a4b2addc87af37b58582b24a46..a50d7fc61fbbee72e81529970d996bbad0bc2b9f 100644 (file)
@@ -35,10 +35,10 @@ public class NetconfConfigUtilTest {
         assertEquals(NetconfConfigUtil.getNetconfLocalAddress(), new LocalAddress("netconf"));
 
         doReturn("").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
-        assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+        assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), NetconfConfigUtil.DEFAULT_TIMEOUT_MILLIS);
 
         doReturn("a").when(bundleContext).getProperty("netconf.connectionTimeoutMillis");
-        assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), 5000);
+        assertEquals(NetconfConfigUtil.extractTimeoutMillis(bundleContext), NetconfConfigUtil.DEFAULT_TIMEOUT_MILLIS);
     }
 
     @Test